var/home/core/zuul-output/0000755000175000017500000000000015111356221014522 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015111370310015461 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005165627415111370301017705 0ustar rootrootNov 25 16:47:04 crc systemd[1]: Starting Kubernetes Kubelet... Nov 25 16:47:04 crc restorecon[4763]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 16:47:04 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 16:47:05 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 16:47:05 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 16:47:05 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 16:47:05 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 16:47:05 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 16:47:05 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 16:47:05 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 16:47:05 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 16:47:05 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 16:47:05 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 16:47:05 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 16:47:05 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 16:47:05 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 16:47:05 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 16:47:05 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 16:47:05 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 25 16:47:05 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 16:47:05 crc restorecon[4763]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 16:47:05 crc restorecon[4763]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 16:47:05 crc restorecon[4763]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 16:47:05 crc restorecon[4763]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 16:47:05 crc restorecon[4763]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 16:47:05 crc restorecon[4763]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 16:47:05 crc restorecon[4763]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 16:47:05 crc restorecon[4763]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 16:47:05 crc restorecon[4763]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 16:47:05 crc restorecon[4763]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 16:47:05 crc restorecon[4763]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 25 16:47:05 crc kubenswrapper[4812]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 16:47:05 crc kubenswrapper[4812]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 25 16:47:05 crc kubenswrapper[4812]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 16:47:05 crc kubenswrapper[4812]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 16:47:05 crc kubenswrapper[4812]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 25 16:47:05 crc kubenswrapper[4812]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.595934 4812 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603630 4812 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603648 4812 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603654 4812 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603659 4812 feature_gate.go:330] unrecognized feature gate: Example Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603663 4812 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603667 4812 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603671 4812 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603675 4812 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603679 4812 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603682 4812 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603685 4812 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603689 4812 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603692 4812 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603696 4812 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603700 4812 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603704 4812 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603708 4812 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603712 4812 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603717 4812 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603721 4812 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603725 4812 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603729 4812 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603734 4812 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603738 4812 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603741 4812 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603745 4812 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603748 4812 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603752 4812 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603755 4812 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603759 4812 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603762 4812 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603766 4812 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603770 4812 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603773 4812 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603777 4812 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603780 4812 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603784 4812 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603787 4812 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603790 4812 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603794 4812 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603797 4812 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603801 4812 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603804 4812 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603807 4812 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603811 4812 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603814 4812 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603818 4812 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603822 4812 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603826 4812 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603829 4812 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603833 4812 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603836 4812 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603840 4812 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603843 4812 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603849 4812 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603853 4812 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603857 4812 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603861 4812 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603865 4812 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603869 4812 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603873 4812 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603878 4812 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603881 4812 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603885 4812 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603888 4812 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603894 4812 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603899 4812 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603903 4812 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603906 4812 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603910 4812 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.603913 4812 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.603979 4812 flags.go:64] FLAG: --address="0.0.0.0" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.603987 4812 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604001 4812 flags.go:64] FLAG: --anonymous-auth="true" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604006 4812 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604014 4812 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604018 4812 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604024 4812 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604029 4812 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604034 4812 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604038 4812 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604043 4812 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604047 4812 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604051 4812 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604056 4812 flags.go:64] FLAG: --cgroup-root="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604060 4812 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604064 4812 flags.go:64] FLAG: --client-ca-file="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604069 4812 flags.go:64] FLAG: --cloud-config="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604073 4812 flags.go:64] FLAG: --cloud-provider="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604077 4812 flags.go:64] FLAG: --cluster-dns="[]" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604083 4812 flags.go:64] FLAG: --cluster-domain="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604086 4812 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604091 4812 flags.go:64] FLAG: --config-dir="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604094 4812 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604099 4812 flags.go:64] FLAG: --container-log-max-files="5" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604104 4812 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604109 4812 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604113 4812 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604118 4812 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604122 4812 flags.go:64] FLAG: --contention-profiling="false" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604126 4812 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604130 4812 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604135 4812 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604139 4812 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604145 4812 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604148 4812 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604153 4812 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604157 4812 flags.go:64] FLAG: --enable-load-reader="false" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604161 4812 flags.go:64] FLAG: --enable-server="true" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604166 4812 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604171 4812 flags.go:64] FLAG: --event-burst="100" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604175 4812 flags.go:64] FLAG: --event-qps="50" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604179 4812 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604183 4812 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604187 4812 flags.go:64] FLAG: --eviction-hard="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604192 4812 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604196 4812 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604201 4812 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604205 4812 flags.go:64] FLAG: --eviction-soft="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604209 4812 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604213 4812 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604217 4812 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604221 4812 flags.go:64] FLAG: --experimental-mounter-path="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604225 4812 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604229 4812 flags.go:64] FLAG: --fail-swap-on="true" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604233 4812 flags.go:64] FLAG: --feature-gates="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604238 4812 flags.go:64] FLAG: --file-check-frequency="20s" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604242 4812 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604247 4812 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604252 4812 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604257 4812 flags.go:64] FLAG: --healthz-port="10248" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604262 4812 flags.go:64] FLAG: --help="false" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604267 4812 flags.go:64] FLAG: --hostname-override="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604272 4812 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604277 4812 flags.go:64] FLAG: --http-check-frequency="20s" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604281 4812 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604286 4812 flags.go:64] FLAG: --image-credential-provider-config="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604291 4812 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604296 4812 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604302 4812 flags.go:64] FLAG: --image-service-endpoint="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604307 4812 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604311 4812 flags.go:64] FLAG: --kube-api-burst="100" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604316 4812 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604321 4812 flags.go:64] FLAG: --kube-api-qps="50" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604326 4812 flags.go:64] FLAG: --kube-reserved="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604331 4812 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604336 4812 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604341 4812 flags.go:64] FLAG: --kubelet-cgroups="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604346 4812 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604352 4812 flags.go:64] FLAG: --lock-file="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604357 4812 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604363 4812 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604369 4812 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604377 4812 flags.go:64] FLAG: --log-json-split-stream="false" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604383 4812 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604390 4812 flags.go:64] FLAG: --log-text-split-stream="false" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604396 4812 flags.go:64] FLAG: --logging-format="text" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604401 4812 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604406 4812 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604411 4812 flags.go:64] FLAG: --manifest-url="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604416 4812 flags.go:64] FLAG: --manifest-url-header="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604423 4812 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604428 4812 flags.go:64] FLAG: --max-open-files="1000000" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604435 4812 flags.go:64] FLAG: --max-pods="110" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604439 4812 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604445 4812 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604450 4812 flags.go:64] FLAG: --memory-manager-policy="None" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604455 4812 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604460 4812 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604465 4812 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604470 4812 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604482 4812 flags.go:64] FLAG: --node-status-max-images="50" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604487 4812 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604493 4812 flags.go:64] FLAG: --oom-score-adj="-999" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604499 4812 flags.go:64] FLAG: --pod-cidr="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604504 4812 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604512 4812 flags.go:64] FLAG: --pod-manifest-path="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604516 4812 flags.go:64] FLAG: --pod-max-pids="-1" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604522 4812 flags.go:64] FLAG: --pods-per-core="0" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604543 4812 flags.go:64] FLAG: --port="10250" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604548 4812 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604553 4812 flags.go:64] FLAG: --provider-id="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604558 4812 flags.go:64] FLAG: --qos-reserved="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604564 4812 flags.go:64] FLAG: --read-only-port="10255" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604570 4812 flags.go:64] FLAG: --register-node="true" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604575 4812 flags.go:64] FLAG: --register-schedulable="true" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604580 4812 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604590 4812 flags.go:64] FLAG: --registry-burst="10" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604595 4812 flags.go:64] FLAG: --registry-qps="5" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604600 4812 flags.go:64] FLAG: --reserved-cpus="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604604 4812 flags.go:64] FLAG: --reserved-memory="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604610 4812 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604615 4812 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604620 4812 flags.go:64] FLAG: --rotate-certificates="false" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604625 4812 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604629 4812 flags.go:64] FLAG: --runonce="false" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604634 4812 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604639 4812 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604644 4812 flags.go:64] FLAG: --seccomp-default="false" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604650 4812 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604654 4812 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604659 4812 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604664 4812 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604670 4812 flags.go:64] FLAG: --storage-driver-password="root" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604679 4812 flags.go:64] FLAG: --storage-driver-secure="false" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604684 4812 flags.go:64] FLAG: --storage-driver-table="stats" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604688 4812 flags.go:64] FLAG: --storage-driver-user="root" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604693 4812 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604698 4812 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604703 4812 flags.go:64] FLAG: --system-cgroups="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604708 4812 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604716 4812 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604721 4812 flags.go:64] FLAG: --tls-cert-file="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604725 4812 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604732 4812 flags.go:64] FLAG: --tls-min-version="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604736 4812 flags.go:64] FLAG: --tls-private-key-file="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604741 4812 flags.go:64] FLAG: --topology-manager-policy="none" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604746 4812 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604751 4812 flags.go:64] FLAG: --topology-manager-scope="container" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604756 4812 flags.go:64] FLAG: --v="2" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604762 4812 flags.go:64] FLAG: --version="false" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604769 4812 flags.go:64] FLAG: --vmodule="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604775 4812 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.604780 4812 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.604914 4812 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.604921 4812 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.604926 4812 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.604932 4812 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.604937 4812 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.604942 4812 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.604947 4812 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.604951 4812 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.604956 4812 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.604961 4812 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.604965 4812 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.604970 4812 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.604978 4812 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.604982 4812 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.604987 4812 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.604991 4812 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.604996 4812 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605000 4812 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605005 4812 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605009 4812 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605013 4812 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605017 4812 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605023 4812 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605028 4812 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605032 4812 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605037 4812 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605041 4812 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605045 4812 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605049 4812 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605053 4812 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605058 4812 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605063 4812 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605069 4812 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605074 4812 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605079 4812 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605084 4812 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605089 4812 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605093 4812 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605098 4812 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605102 4812 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605106 4812 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605111 4812 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605115 4812 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605119 4812 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605125 4812 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605128 4812 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605132 4812 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605136 4812 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605141 4812 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605144 4812 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605148 4812 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605151 4812 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605155 4812 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605158 4812 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605163 4812 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605166 4812 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605170 4812 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605175 4812 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605182 4812 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605189 4812 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605195 4812 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605199 4812 feature_gate.go:330] unrecognized feature gate: Example Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605205 4812 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605210 4812 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605214 4812 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605218 4812 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605221 4812 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605225 4812 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605228 4812 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605232 4812 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.605236 4812 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.605991 4812 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.612938 4812 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.612973 4812 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613033 4812 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613041 4812 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613046 4812 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613050 4812 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613054 4812 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613058 4812 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613062 4812 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613065 4812 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613069 4812 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613073 4812 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613078 4812 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613082 4812 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613086 4812 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613089 4812 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613093 4812 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613097 4812 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613102 4812 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613106 4812 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613109 4812 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613113 4812 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613116 4812 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613121 4812 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613126 4812 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613129 4812 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613133 4812 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613136 4812 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613140 4812 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613144 4812 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613147 4812 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613151 4812 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613154 4812 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613158 4812 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613162 4812 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613165 4812 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613171 4812 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613176 4812 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613181 4812 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613184 4812 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613188 4812 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613192 4812 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613196 4812 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613200 4812 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613204 4812 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613208 4812 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613213 4812 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613217 4812 feature_gate.go:330] unrecognized feature gate: Example Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613221 4812 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613225 4812 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613229 4812 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613232 4812 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613236 4812 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613240 4812 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613244 4812 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613247 4812 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613251 4812 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613254 4812 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613258 4812 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613261 4812 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613265 4812 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613268 4812 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613271 4812 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613275 4812 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613279 4812 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613282 4812 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613286 4812 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613289 4812 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613292 4812 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613296 4812 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613299 4812 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613303 4812 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613307 4812 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.613314 4812 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613430 4812 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613436 4812 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613442 4812 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613445 4812 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613449 4812 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613453 4812 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613457 4812 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613461 4812 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613464 4812 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613468 4812 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613471 4812 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613475 4812 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613478 4812 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613482 4812 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613485 4812 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613489 4812 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613493 4812 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613499 4812 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613503 4812 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613507 4812 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613510 4812 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613514 4812 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613520 4812 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613524 4812 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613541 4812 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613545 4812 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613549 4812 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613552 4812 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613556 4812 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613559 4812 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613563 4812 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613566 4812 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613570 4812 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613573 4812 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613578 4812 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613581 4812 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613585 4812 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613588 4812 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613592 4812 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613595 4812 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613599 4812 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613602 4812 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613606 4812 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613609 4812 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613612 4812 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613616 4812 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613619 4812 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613623 4812 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613626 4812 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613629 4812 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613633 4812 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613636 4812 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613639 4812 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613643 4812 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613648 4812 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613652 4812 feature_gate.go:330] unrecognized feature gate: Example Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613656 4812 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613659 4812 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613663 4812 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613666 4812 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613670 4812 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613673 4812 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613677 4812 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613681 4812 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613686 4812 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613689 4812 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613693 4812 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613697 4812 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613702 4812 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613705 4812 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.613710 4812 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.613716 4812 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.614767 4812 server.go:940] "Client rotation is on, will bootstrap in background" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.619343 4812 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.619425 4812 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.621108 4812 server.go:997] "Starting client certificate rotation" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.621138 4812 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.621970 4812 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-19 10:36:00.077663921 +0000 UTC Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.622110 4812 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 569h48m54.455559661s for next certificate rotation Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.651772 4812 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.654571 4812 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.671548 4812 log.go:25] "Validated CRI v1 runtime API" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.710360 4812 log.go:25] "Validated CRI v1 image API" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.712948 4812 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.723805 4812 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-25-16-41-58-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.724126 4812 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.751404 4812 manager.go:217] Machine: {Timestamp:2025-11-25 16:47:05.747693227 +0000 UTC m=+0.587835392 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2799998 MemoryCapacity:33654124544 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:93542aec-3cae-4037-9cb4-28e49d8b2f68 BootID:42bff2ed-94cf-457c-8bcf-017111af962a Filesystems:[{Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365408768 Type:vfs Inodes:821633 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108169 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827060224 Type:vfs Inodes:4108169 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:cf:9c:5d Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:cf:9c:5d Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:30:3a:dc Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:b7:8b:05 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:dc:3d:f4 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:b0:c1:7f Speed:-1 Mtu:1496} {Name:ens7.23 MacAddress:52:54:00:88:60:46 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:52:3e:d7:bc:04:e4 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:76:45:81:cb:eb:1f Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654124544 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.751835 4812 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.752081 4812 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.754720 4812 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.755049 4812 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.755106 4812 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.756959 4812 topology_manager.go:138] "Creating topology manager with none policy" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.756998 4812 container_manager_linux.go:303] "Creating device plugin manager" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.757461 4812 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.757493 4812 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.757747 4812 state_mem.go:36] "Initialized new in-memory state store" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.758180 4812 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.762964 4812 kubelet.go:418] "Attempting to sync node with API server" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.763014 4812 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.763051 4812 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.763075 4812 kubelet.go:324] "Adding apiserver pod source" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.763093 4812 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.769023 4812 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.769790 4812 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.188:6443: connect: connection refused Nov 25 16:47:05 crc kubenswrapper[4812]: E1125 16:47:05.769963 4812 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.188:6443: connect: connection refused" logger="UnhandledError" Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.770059 4812 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.188:6443: connect: connection refused Nov 25 16:47:05 crc kubenswrapper[4812]: E1125 16:47:05.770271 4812 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.188:6443: connect: connection refused" logger="UnhandledError" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.770795 4812 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.772278 4812 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.773598 4812 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.773622 4812 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.773631 4812 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.773638 4812 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.773649 4812 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.773656 4812 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.773663 4812 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.773673 4812 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.773682 4812 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.773689 4812 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.773699 4812 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.773705 4812 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.776046 4812 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.776750 4812 server.go:1280] "Started kubelet" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.778061 4812 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.188:6443: connect: connection refused Nov 25 16:47:05 crc systemd[1]: Started Kubernetes Kubelet. Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.778392 4812 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.778419 4812 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.778818 4812 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.778844 4812 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.779001 4812 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-19 05:32:55.856330043 +0000 UTC Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.779029 4812 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 564h45m50.077302058s for next certificate rotation Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.779686 4812 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.779806 4812 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.779896 4812 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.780072 4812 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.780272 4812 server.go:460] "Adding debug handlers to kubelet server" Nov 25 16:47:05 crc kubenswrapper[4812]: E1125 16:47:05.781329 4812 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.781560 4812 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.188:6443: connect: connection refused Nov 25 16:47:05 crc kubenswrapper[4812]: E1125 16:47:05.781781 4812 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.188:6443: connect: connection refused" logger="UnhandledError" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.781664 4812 factory.go:55] Registering systemd factory Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.781811 4812 factory.go:221] Registration of the systemd container factory successfully Nov 25 16:47:05 crc kubenswrapper[4812]: E1125 16:47:05.782064 4812 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.188:6443: connect: connection refused" interval="200ms" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.782112 4812 factory.go:153] Registering CRI-O factory Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.782137 4812 factory.go:221] Registration of the crio container factory successfully Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.782241 4812 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.782272 4812 factory.go:103] Registering Raw factory Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.782295 4812 manager.go:1196] Started watching for new ooms in manager Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.783306 4812 manager.go:319] Starting recovery of all containers Nov 25 16:47:05 crc kubenswrapper[4812]: E1125 16:47:05.784744 4812 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.188:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187b4dcde683decb default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 16:47:05.776717515 +0000 UTC m=+0.616859610,LastTimestamp:2025-11-25 16:47:05.776717515 +0000 UTC m=+0.616859610,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.802425 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.802516 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.802578 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.802603 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.802646 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.802666 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.802685 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.802714 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.802749 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.802767 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.802786 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.802805 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.802824 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.802848 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.802874 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.802924 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.802947 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.802976 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.802997 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803018 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803036 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803056 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803074 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803094 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803113 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803132 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803165 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803186 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803204 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803223 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803241 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803293 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803337 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803357 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803376 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803397 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803418 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803438 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803474 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803495 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803579 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803623 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803647 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803666 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803685 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803703 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803721 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803739 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803759 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803778 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803797 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803816 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803852 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803874 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803896 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803917 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803938 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803958 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803978 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.803997 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804018 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804036 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804055 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804075 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804103 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804136 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804157 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804176 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804193 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804212 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804240 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804269 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804288 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804306 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804324 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804361 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804380 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804408 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804429 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804448 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804467 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804490 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804510 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804569 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804602 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804638 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804666 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804687 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804714 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804733 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804753 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804772 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804791 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804810 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804828 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804845 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804868 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804887 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804915 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804932 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804950 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804968 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.804986 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.805010 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.805044 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.805072 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.805093 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.805112 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.805133 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.805153 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.805191 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.805210 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.805230 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.805248 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.805289 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.805309 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.805329 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.805345 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.805365 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.805382 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.805399 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.805416 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.805433 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.805450 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.805469 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.805488 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.805520 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.805587 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.805657 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.805681 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.805741 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.805767 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.805791 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.805812 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.805836 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.805860 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.805882 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.805928 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.805959 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.805984 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.806008 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.806033 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.806057 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.806081 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.806107 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.806132 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.806156 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.806224 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.806249 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.806282 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.806306 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.806328 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.806350 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.806373 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.806394 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.806415 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.806437 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.806462 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.806497 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.806526 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.806591 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.806640 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.806671 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.806695 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.806717 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.806740 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.806762 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.806804 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.806834 4812 manager.go:324] Recovery completed Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.806876 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.807503 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.807580 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.807598 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.807614 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.807628 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.807642 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.807655 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.807671 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.807684 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.807698 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.807711 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.807726 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.807744 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.807761 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.807773 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.807799 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.807812 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.807824 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.807838 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.807851 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.807871 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.807891 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.807911 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.807939 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.807957 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.807972 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.807986 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.807999 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.808012 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.808026 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.808039 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.808054 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.808076 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.808095 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.808109 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.808123 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.808140 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.810267 4812 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.810323 4812 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.810352 4812 reconstruct.go:97] "Volume reconstruction finished" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.810363 4812 reconciler.go:26] "Reconciler: start to sync state" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.818810 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.821185 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.821228 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.821239 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.822003 4812 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.822022 4812 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.822055 4812 state_mem.go:36] "Initialized new in-memory state store" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.827996 4812 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.830191 4812 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.830229 4812 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.830253 4812 kubelet.go:2335] "Starting kubelet main sync loop" Nov 25 16:47:05 crc kubenswrapper[4812]: E1125 16:47:05.830291 4812 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 25 16:47:05 crc kubenswrapper[4812]: W1125 16:47:05.831938 4812 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.188:6443: connect: connection refused Nov 25 16:47:05 crc kubenswrapper[4812]: E1125 16:47:05.832007 4812 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.188:6443: connect: connection refused" logger="UnhandledError" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.846917 4812 policy_none.go:49] "None policy: Start" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.848023 4812 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.848048 4812 state_mem.go:35] "Initializing new in-memory state store" Nov 25 16:47:05 crc kubenswrapper[4812]: E1125 16:47:05.882160 4812 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.888512 4812 manager.go:334] "Starting Device Plugin manager" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.888601 4812 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.888612 4812 server.go:79] "Starting device plugin registration server" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.888972 4812 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.888988 4812 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.889433 4812 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.889589 4812 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.889601 4812 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 25 16:47:05 crc kubenswrapper[4812]: E1125 16:47:05.901500 4812 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.930727 4812 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.930839 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.933831 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.934379 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.934391 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.934604 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.934731 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.934781 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.935577 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.935616 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.935627 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.935757 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.935809 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.935827 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.935837 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.935943 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.935976 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.936419 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.936455 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.936468 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.936600 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.936630 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.936643 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.936792 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.936827 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.936844 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.937446 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.937469 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.937477 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.937862 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.937918 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.937933 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.938094 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.938151 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.938180 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.939076 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.939113 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.939084 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.939126 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.939144 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.939156 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.939337 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.939361 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.940335 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.940360 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.940369 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:05 crc kubenswrapper[4812]: E1125 16:47:05.982895 4812 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.188:6443: connect: connection refused" interval="400ms" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.989233 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.990357 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.990386 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.990395 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:05 crc kubenswrapper[4812]: I1125 16:47:05.990471 4812 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 16:47:05 crc kubenswrapper[4812]: E1125 16:47:05.991049 4812 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.188:6443: connect: connection refused" node="crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.012998 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.013043 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.013062 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.013077 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.013114 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.013144 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.013170 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.013207 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.013234 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.013252 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.013267 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.013302 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.013335 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.013350 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.013370 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.114421 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.114487 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.114512 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.114561 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.114576 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.114589 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.114603 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.114620 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.114634 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.114701 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.114688 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.114743 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.114759 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.114788 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.114790 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.114754 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.114653 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.114766 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.114865 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.114884 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.114909 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.114928 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.114975 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.114991 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.115001 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.115006 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.115037 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.115056 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.115057 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.115061 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.192145 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.193487 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.193517 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.193526 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.193560 4812 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 16:47:06 crc kubenswrapper[4812]: E1125 16:47:06.194018 4812 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.188:6443: connect: connection refused" node="crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.274226 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.287899 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.305722 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.312281 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.316357 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 16:47:06 crc kubenswrapper[4812]: W1125 16:47:06.327463 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-e6d3b8bf8ae011d1b6fb24f414a4076caf52a0a6679762ec98219743b41b1a94 WatchSource:0}: Error finding container e6d3b8bf8ae011d1b6fb24f414a4076caf52a0a6679762ec98219743b41b1a94: Status 404 returned error can't find the container with id e6d3b8bf8ae011d1b6fb24f414a4076caf52a0a6679762ec98219743b41b1a94 Nov 25 16:47:06 crc kubenswrapper[4812]: W1125 16:47:06.331454 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-01444a6aa3dda4d8b1b03e483c7526c4803e69d784e80f44d2c6d0a5cbbf538c WatchSource:0}: Error finding container 01444a6aa3dda4d8b1b03e483c7526c4803e69d784e80f44d2c6d0a5cbbf538c: Status 404 returned error can't find the container with id 01444a6aa3dda4d8b1b03e483c7526c4803e69d784e80f44d2c6d0a5cbbf538c Nov 25 16:47:06 crc kubenswrapper[4812]: W1125 16:47:06.333942 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-4d9c23445bd489ee7e4633ce3334da792d47f61efffcbdd64d40cf9d40dc436b WatchSource:0}: Error finding container 4d9c23445bd489ee7e4633ce3334da792d47f61efffcbdd64d40cf9d40dc436b: Status 404 returned error can't find the container with id 4d9c23445bd489ee7e4633ce3334da792d47f61efffcbdd64d40cf9d40dc436b Nov 25 16:47:06 crc kubenswrapper[4812]: W1125 16:47:06.336494 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-7419f57d10de33542505f9959f59c27831211ba3f56724e38febad41bea0294d WatchSource:0}: Error finding container 7419f57d10de33542505f9959f59c27831211ba3f56724e38febad41bea0294d: Status 404 returned error can't find the container with id 7419f57d10de33542505f9959f59c27831211ba3f56724e38febad41bea0294d Nov 25 16:47:06 crc kubenswrapper[4812]: E1125 16:47:06.383867 4812 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.188:6443: connect: connection refused" interval="800ms" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.595003 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.596875 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.596925 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.596935 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.596961 4812 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 16:47:06 crc kubenswrapper[4812]: E1125 16:47:06.597451 4812 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.188:6443: connect: connection refused" node="crc" Nov 25 16:47:06 crc kubenswrapper[4812]: W1125 16:47:06.756923 4812 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.188:6443: connect: connection refused Nov 25 16:47:06 crc kubenswrapper[4812]: E1125 16:47:06.757017 4812 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.188:6443: connect: connection refused" logger="UnhandledError" Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.779138 4812 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.188:6443: connect: connection refused Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.833697 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"7419f57d10de33542505f9959f59c27831211ba3f56724e38febad41bea0294d"} Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.834632 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"4d9c23445bd489ee7e4633ce3334da792d47f61efffcbdd64d40cf9d40dc436b"} Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.836583 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"01444a6aa3dda4d8b1b03e483c7526c4803e69d784e80f44d2c6d0a5cbbf538c"} Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.837362 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"cb5c00501ffaba3644554fbb3d565d2d7c3bc290cf542ea57b309d30ca3d3f4b"} Nov 25 16:47:06 crc kubenswrapper[4812]: I1125 16:47:06.838174 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"e6d3b8bf8ae011d1b6fb24f414a4076caf52a0a6679762ec98219743b41b1a94"} Nov 25 16:47:07 crc kubenswrapper[4812]: W1125 16:47:07.035009 4812 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.188:6443: connect: connection refused Nov 25 16:47:07 crc kubenswrapper[4812]: E1125 16:47:07.035088 4812 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.188:6443: connect: connection refused" logger="UnhandledError" Nov 25 16:47:07 crc kubenswrapper[4812]: W1125 16:47:07.082373 4812 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.188:6443: connect: connection refused Nov 25 16:47:07 crc kubenswrapper[4812]: E1125 16:47:07.082473 4812 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.188:6443: connect: connection refused" logger="UnhandledError" Nov 25 16:47:07 crc kubenswrapper[4812]: E1125 16:47:07.184471 4812 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.188:6443: connect: connection refused" interval="1.6s" Nov 25 16:47:07 crc kubenswrapper[4812]: W1125 16:47:07.260466 4812 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.188:6443: connect: connection refused Nov 25 16:47:07 crc kubenswrapper[4812]: E1125 16:47:07.260591 4812 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.188:6443: connect: connection refused" logger="UnhandledError" Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.398551 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.399895 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.399941 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.399953 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.399981 4812 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 16:47:07 crc kubenswrapper[4812]: E1125 16:47:07.400471 4812 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.188:6443: connect: connection refused" node="crc" Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.779378 4812 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.188:6443: connect: connection refused Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.841545 4812 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="0bfc00f90488779a576e3485989a9abc5251762db849ed9f317f7556d2f5d69c" exitCode=0 Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.841613 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"0bfc00f90488779a576e3485989a9abc5251762db849ed9f317f7556d2f5d69c"} Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.841674 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.842570 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.842647 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.842657 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.842727 4812 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="e5927f6ba428b58fbc20621d6c32b9205b2d33526975e335ed6f5c642481453f" exitCode=0 Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.842776 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"e5927f6ba428b58fbc20621d6c32b9205b2d33526975e335ed6f5c642481453f"} Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.842864 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.843671 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.843702 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.843713 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.844873 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.844861 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"c173b4b996674926da742cdbbfe34c03e6711cf1005fff5faf16a266cfffec0a"} Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.844945 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"d11e018db2635e10a6a329a03047f13b3ddf47115077c36a596b76bdc43275c4"} Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.844958 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"bc3351f45003a0574af7993746039012d2cdb2d17ac1dd8b1f8b629dd4d921f8"} Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.844968 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"794035d1a2dbe8b5a1d28a4174ca6729fed6f236749008a13826d8541edd5a21"} Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.845711 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.845740 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.845753 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.848737 4812 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d" exitCode=0 Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.848795 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d"} Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.848847 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.850620 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.850661 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.850678 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.851192 4812 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10" exitCode=0 Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.851228 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10"} Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.851303 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.852130 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.852167 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.852179 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.852588 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.853327 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.853359 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:07 crc kubenswrapper[4812]: I1125 16:47:07.853371 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:08 crc kubenswrapper[4812]: I1125 16:47:08.779236 4812 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.188:6443: connect: connection refused Nov 25 16:47:08 crc kubenswrapper[4812]: E1125 16:47:08.786142 4812 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.188:6443: connect: connection refused" interval="3.2s" Nov 25 16:47:08 crc kubenswrapper[4812]: I1125 16:47:08.856637 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407"} Nov 25 16:47:08 crc kubenswrapper[4812]: I1125 16:47:08.856692 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483"} Nov 25 16:47:08 crc kubenswrapper[4812]: I1125 16:47:08.856704 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe"} Nov 25 16:47:08 crc kubenswrapper[4812]: I1125 16:47:08.856716 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084"} Nov 25 16:47:08 crc kubenswrapper[4812]: I1125 16:47:08.856730 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747"} Nov 25 16:47:08 crc kubenswrapper[4812]: I1125 16:47:08.856732 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:08 crc kubenswrapper[4812]: I1125 16:47:08.857835 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:08 crc kubenswrapper[4812]: I1125 16:47:08.857867 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:08 crc kubenswrapper[4812]: I1125 16:47:08.857878 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:08 crc kubenswrapper[4812]: I1125 16:47:08.859768 4812 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8" exitCode=0 Nov 25 16:47:08 crc kubenswrapper[4812]: I1125 16:47:08.859797 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8"} Nov 25 16:47:08 crc kubenswrapper[4812]: I1125 16:47:08.859877 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:08 crc kubenswrapper[4812]: I1125 16:47:08.860796 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:08 crc kubenswrapper[4812]: I1125 16:47:08.860824 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:08 crc kubenswrapper[4812]: I1125 16:47:08.860832 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:08 crc kubenswrapper[4812]: I1125 16:47:08.861390 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"53487d51602502ad1f7e429507c24ebbffa5a6235663c6c8de0d67240a2c9bee"} Nov 25 16:47:08 crc kubenswrapper[4812]: I1125 16:47:08.861407 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:08 crc kubenswrapper[4812]: I1125 16:47:08.862070 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:08 crc kubenswrapper[4812]: I1125 16:47:08.862092 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:08 crc kubenswrapper[4812]: I1125 16:47:08.862101 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:08 crc kubenswrapper[4812]: I1125 16:47:08.867816 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"1d869ab1ee5c083df5c5657f05d76a4e1ff0427f9b01b1eb26a0002d11ed5204"} Nov 25 16:47:08 crc kubenswrapper[4812]: I1125 16:47:08.867849 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:08 crc kubenswrapper[4812]: I1125 16:47:08.867863 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"396f152de0c7b552d1179bebce809cdcd8a51566b5a4c977b615df21191fd4ba"} Nov 25 16:47:08 crc kubenswrapper[4812]: I1125 16:47:08.867879 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"2238540e1f8967fe1d9e64bbc0961e190c011025fe10f4f7757aaaa03f690117"} Nov 25 16:47:08 crc kubenswrapper[4812]: I1125 16:47:08.867996 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:08 crc kubenswrapper[4812]: I1125 16:47:08.868569 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:08 crc kubenswrapper[4812]: I1125 16:47:08.868618 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:08 crc kubenswrapper[4812]: I1125 16:47:08.868635 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:08 crc kubenswrapper[4812]: I1125 16:47:08.868912 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:08 crc kubenswrapper[4812]: I1125 16:47:08.868937 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:08 crc kubenswrapper[4812]: I1125 16:47:08.868945 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:08 crc kubenswrapper[4812]: I1125 16:47:08.982211 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 16:47:08 crc kubenswrapper[4812]: I1125 16:47:08.991082 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 16:47:09 crc kubenswrapper[4812]: I1125 16:47:09.001025 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:09 crc kubenswrapper[4812]: I1125 16:47:09.002089 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:09 crc kubenswrapper[4812]: I1125 16:47:09.002124 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:09 crc kubenswrapper[4812]: I1125 16:47:09.002132 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:09 crc kubenswrapper[4812]: I1125 16:47:09.002153 4812 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 16:47:09 crc kubenswrapper[4812]: E1125 16:47:09.002501 4812 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.188:6443: connect: connection refused" node="crc" Nov 25 16:47:09 crc kubenswrapper[4812]: W1125 16:47:09.134470 4812 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.188:6443: connect: connection refused Nov 25 16:47:09 crc kubenswrapper[4812]: E1125 16:47:09.134568 4812 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.188:6443: connect: connection refused" logger="UnhandledError" Nov 25 16:47:09 crc kubenswrapper[4812]: W1125 16:47:09.246630 4812 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.188:6443: connect: connection refused Nov 25 16:47:09 crc kubenswrapper[4812]: E1125 16:47:09.246730 4812 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.188:6443: connect: connection refused" logger="UnhandledError" Nov 25 16:47:09 crc kubenswrapper[4812]: I1125 16:47:09.832343 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 16:47:09 crc kubenswrapper[4812]: I1125 16:47:09.871781 4812 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b" exitCode=0 Nov 25 16:47:09 crc kubenswrapper[4812]: I1125 16:47:09.871859 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b"} Nov 25 16:47:09 crc kubenswrapper[4812]: I1125 16:47:09.871881 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:09 crc kubenswrapper[4812]: I1125 16:47:09.871974 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 16:47:09 crc kubenswrapper[4812]: I1125 16:47:09.871980 4812 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 16:47:09 crc kubenswrapper[4812]: I1125 16:47:09.872038 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:09 crc kubenswrapper[4812]: I1125 16:47:09.872059 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 16:47:09 crc kubenswrapper[4812]: I1125 16:47:09.872038 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:09 crc kubenswrapper[4812]: I1125 16:47:09.871990 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:09 crc kubenswrapper[4812]: I1125 16:47:09.871984 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:09 crc kubenswrapper[4812]: I1125 16:47:09.872616 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:09 crc kubenswrapper[4812]: I1125 16:47:09.872638 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:09 crc kubenswrapper[4812]: I1125 16:47:09.872646 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:09 crc kubenswrapper[4812]: I1125 16:47:09.873051 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:09 crc kubenswrapper[4812]: I1125 16:47:09.873055 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:09 crc kubenswrapper[4812]: I1125 16:47:09.873099 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:09 crc kubenswrapper[4812]: I1125 16:47:09.873107 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:09 crc kubenswrapper[4812]: I1125 16:47:09.873081 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:09 crc kubenswrapper[4812]: I1125 16:47:09.873132 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:09 crc kubenswrapper[4812]: I1125 16:47:09.874210 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:09 crc kubenswrapper[4812]: I1125 16:47:09.874227 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:09 crc kubenswrapper[4812]: I1125 16:47:09.874235 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:09 crc kubenswrapper[4812]: I1125 16:47:09.874787 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:09 crc kubenswrapper[4812]: I1125 16:47:09.874809 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:09 crc kubenswrapper[4812]: I1125 16:47:09.874817 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:10 crc kubenswrapper[4812]: I1125 16:47:10.810739 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 16:47:10 crc kubenswrapper[4812]: I1125 16:47:10.878440 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:10 crc kubenswrapper[4812]: I1125 16:47:10.878474 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"69a98172920b9a903cb0defe4630d0c6c1bb43d466ff352eb4d60e36fa18ea23"} Nov 25 16:47:10 crc kubenswrapper[4812]: I1125 16:47:10.878511 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"7668dab7cab029814b4edd7225c0a6fac9481f4e74f0ab906661b28775cdde45"} Nov 25 16:47:10 crc kubenswrapper[4812]: I1125 16:47:10.878524 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"effc25d4eb530d685c8a7a0acebd25c418c3fc3675b367fe9ab49ae7c22a1cf6"} Nov 25 16:47:10 crc kubenswrapper[4812]: I1125 16:47:10.878583 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"179d394510edea595ed1ac39619911e2167f2b9cb5d228635609760932f428e3"} Nov 25 16:47:10 crc kubenswrapper[4812]: I1125 16:47:10.878593 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"d96c0e65aabd958b32841ea475624de4b58ff5c8690a5c767f6548872f6cc7ed"} Nov 25 16:47:10 crc kubenswrapper[4812]: I1125 16:47:10.878486 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:10 crc kubenswrapper[4812]: I1125 16:47:10.878449 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:10 crc kubenswrapper[4812]: I1125 16:47:10.878440 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:10 crc kubenswrapper[4812]: I1125 16:47:10.879734 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:10 crc kubenswrapper[4812]: I1125 16:47:10.879763 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:10 crc kubenswrapper[4812]: I1125 16:47:10.879774 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:10 crc kubenswrapper[4812]: I1125 16:47:10.879773 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:10 crc kubenswrapper[4812]: I1125 16:47:10.879810 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:10 crc kubenswrapper[4812]: I1125 16:47:10.879827 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:10 crc kubenswrapper[4812]: I1125 16:47:10.880408 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:10 crc kubenswrapper[4812]: I1125 16:47:10.880419 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:10 crc kubenswrapper[4812]: I1125 16:47:10.880427 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:10 crc kubenswrapper[4812]: I1125 16:47:10.880434 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:10 crc kubenswrapper[4812]: I1125 16:47:10.880437 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:10 crc kubenswrapper[4812]: I1125 16:47:10.880442 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:11 crc kubenswrapper[4812]: I1125 16:47:11.054266 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 16:47:11 crc kubenswrapper[4812]: I1125 16:47:11.609255 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 16:47:11 crc kubenswrapper[4812]: I1125 16:47:11.880852 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:11 crc kubenswrapper[4812]: I1125 16:47:11.880929 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:11 crc kubenswrapper[4812]: I1125 16:47:11.880852 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:11 crc kubenswrapper[4812]: I1125 16:47:11.881689 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:11 crc kubenswrapper[4812]: I1125 16:47:11.881711 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:11 crc kubenswrapper[4812]: I1125 16:47:11.881719 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:11 crc kubenswrapper[4812]: I1125 16:47:11.881951 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:11 crc kubenswrapper[4812]: I1125 16:47:11.881982 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:11 crc kubenswrapper[4812]: I1125 16:47:11.881993 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:11 crc kubenswrapper[4812]: I1125 16:47:11.882061 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:11 crc kubenswrapper[4812]: I1125 16:47:11.882089 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:11 crc kubenswrapper[4812]: I1125 16:47:11.882099 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:12 crc kubenswrapper[4812]: I1125 16:47:12.203460 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:12 crc kubenswrapper[4812]: I1125 16:47:12.204604 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:12 crc kubenswrapper[4812]: I1125 16:47:12.204638 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:12 crc kubenswrapper[4812]: I1125 16:47:12.204649 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:12 crc kubenswrapper[4812]: I1125 16:47:12.204674 4812 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 16:47:12 crc kubenswrapper[4812]: I1125 16:47:12.216278 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 16:47:12 crc kubenswrapper[4812]: I1125 16:47:12.379997 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 25 16:47:12 crc kubenswrapper[4812]: I1125 16:47:12.883699 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:12 crc kubenswrapper[4812]: I1125 16:47:12.883761 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:12 crc kubenswrapper[4812]: I1125 16:47:12.883772 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:12 crc kubenswrapper[4812]: I1125 16:47:12.885058 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:12 crc kubenswrapper[4812]: I1125 16:47:12.885148 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:12 crc kubenswrapper[4812]: I1125 16:47:12.885161 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:12 crc kubenswrapper[4812]: I1125 16:47:12.885216 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:12 crc kubenswrapper[4812]: I1125 16:47:12.885248 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:12 crc kubenswrapper[4812]: I1125 16:47:12.885260 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:12 crc kubenswrapper[4812]: I1125 16:47:12.885225 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:12 crc kubenswrapper[4812]: I1125 16:47:12.885302 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:12 crc kubenswrapper[4812]: I1125 16:47:12.885316 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:14 crc kubenswrapper[4812]: I1125 16:47:14.610494 4812 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 16:47:14 crc kubenswrapper[4812]: I1125 16:47:14.610677 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 16:47:15 crc kubenswrapper[4812]: E1125 16:47:15.901617 4812 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 25 16:47:16 crc kubenswrapper[4812]: I1125 16:47:16.393444 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 16:47:16 crc kubenswrapper[4812]: I1125 16:47:16.393616 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:16 crc kubenswrapper[4812]: I1125 16:47:16.394650 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:16 crc kubenswrapper[4812]: I1125 16:47:16.394672 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:16 crc kubenswrapper[4812]: I1125 16:47:16.394681 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:19 crc kubenswrapper[4812]: W1125 16:47:19.417330 4812 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 25 16:47:19 crc kubenswrapper[4812]: I1125 16:47:19.417443 4812 trace.go:236] Trace[589405133]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (25-Nov-2025 16:47:09.415) (total time: 10001ms): Nov 25 16:47:19 crc kubenswrapper[4812]: Trace[589405133]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (16:47:19.417) Nov 25 16:47:19 crc kubenswrapper[4812]: Trace[589405133]: [10.001437042s] [10.001437042s] END Nov 25 16:47:19 crc kubenswrapper[4812]: E1125 16:47:19.417471 4812 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 25 16:47:19 crc kubenswrapper[4812]: I1125 16:47:19.780614 4812 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Nov 25 16:47:19 crc kubenswrapper[4812]: I1125 16:47:19.810985 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 25 16:47:19 crc kubenswrapper[4812]: I1125 16:47:19.811435 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:19 crc kubenswrapper[4812]: I1125 16:47:19.812717 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:19 crc kubenswrapper[4812]: I1125 16:47:19.812755 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:19 crc kubenswrapper[4812]: I1125 16:47:19.812763 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:19 crc kubenswrapper[4812]: I1125 16:47:19.832377 4812 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="Get \"https://192.168.126.11:6443/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 16:47:19 crc kubenswrapper[4812]: I1125 16:47:19.832436 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="Get \"https://192.168.126.11:6443/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 16:47:20 crc kubenswrapper[4812]: W1125 16:47:20.224673 4812 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 25 16:47:20 crc kubenswrapper[4812]: I1125 16:47:20.224777 4812 trace.go:236] Trace[2119965022]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (25-Nov-2025 16:47:10.223) (total time: 10001ms): Nov 25 16:47:20 crc kubenswrapper[4812]: Trace[2119965022]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (16:47:20.224) Nov 25 16:47:20 crc kubenswrapper[4812]: Trace[2119965022]: [10.001604347s] [10.001604347s] END Nov 25 16:47:20 crc kubenswrapper[4812]: E1125 16:47:20.224806 4812 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 25 16:47:20 crc kubenswrapper[4812]: I1125 16:47:20.685713 4812 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 25 16:47:20 crc kubenswrapper[4812]: I1125 16:47:20.685812 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 25 16:47:23 crc kubenswrapper[4812]: I1125 16:47:23.671361 4812 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 25 16:47:23 crc kubenswrapper[4812]: I1125 16:47:23.837298 4812 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 25 16:47:24 crc kubenswrapper[4812]: I1125 16:47:24.610178 4812 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 16:47:24 crc kubenswrapper[4812]: I1125 16:47:24.610344 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 16:47:24 crc kubenswrapper[4812]: I1125 16:47:24.841197 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 16:47:24 crc kubenswrapper[4812]: I1125 16:47:24.841762 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:24 crc kubenswrapper[4812]: I1125 16:47:24.844898 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:24 crc kubenswrapper[4812]: I1125 16:47:24.844970 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:24 crc kubenswrapper[4812]: I1125 16:47:24.844982 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:24 crc kubenswrapper[4812]: I1125 16:47:24.848616 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 16:47:24 crc kubenswrapper[4812]: I1125 16:47:24.916648 4812 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 16:47:24 crc kubenswrapper[4812]: I1125 16:47:24.916706 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:24 crc kubenswrapper[4812]: I1125 16:47:24.917397 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:24 crc kubenswrapper[4812]: I1125 16:47:24.917434 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:24 crc kubenswrapper[4812]: I1125 16:47:24.917444 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:25 crc kubenswrapper[4812]: E1125 16:47:25.685092 4812 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.687438 4812 trace.go:236] Trace[1706709932]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (25-Nov-2025 16:47:14.848) (total time: 10838ms): Nov 25 16:47:25 crc kubenswrapper[4812]: Trace[1706709932]: ---"Objects listed" error: 10838ms (16:47:25.687) Nov 25 16:47:25 crc kubenswrapper[4812]: Trace[1706709932]: [10.838790352s] [10.838790352s] END Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.687747 4812 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 25 16:47:25 crc kubenswrapper[4812]: E1125 16:47:25.688173 4812 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.688321 4812 trace.go:236] Trace[668227152]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (25-Nov-2025 16:47:15.459) (total time: 10229ms): Nov 25 16:47:25 crc kubenswrapper[4812]: Trace[668227152]: ---"Objects listed" error: 10229ms (16:47:25.688) Nov 25 16:47:25 crc kubenswrapper[4812]: Trace[668227152]: [10.229236323s] [10.229236323s] END Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.688340 4812 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.688704 4812 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.713432 4812 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:37596->192.168.126.11:17697: read: connection reset by peer" start-of-body= Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.713443 4812 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:37606->192.168.126.11:17697: read: connection reset by peer" start-of-body= Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.713489 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:37596->192.168.126.11:17697: read: connection reset by peer" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.713526 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:37606->192.168.126.11:17697: read: connection reset by peer" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.713835 4812 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.713870 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.774071 4812 apiserver.go:52] "Watching apiserver" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.777747 4812 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.778074 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-dns/node-resolver-dgtfm"] Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.778631 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.778720 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:47:25 crc kubenswrapper[4812]: E1125 16:47:25.778868 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.778912 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.778993 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.779041 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 16:47:25 crc kubenswrapper[4812]: E1125 16:47:25.778996 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.779692 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:47:25 crc kubenswrapper[4812]: E1125 16:47:25.779729 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.779698 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-dgtfm" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.780642 4812 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.783740 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.783830 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.785054 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.785273 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.785391 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.785407 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.786014 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.786062 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.786183 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.786467 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.788284 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.788999 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789033 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789060 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789086 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789111 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789131 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789152 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789174 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789193 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789212 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789233 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789256 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789276 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789296 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789318 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789338 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789358 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789388 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789410 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789431 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789453 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789474 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789494 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789515 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789552 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789575 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789598 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789619 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789640 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789663 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789684 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789705 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789724 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789766 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789833 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789854 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789874 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789896 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789951 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789972 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.789996 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.790017 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.790125 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.790148 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.790168 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.790189 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.790210 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.790230 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.790251 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.790273 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.790293 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.790315 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.790335 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.790357 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.790402 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.790429 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.790453 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.790474 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.790495 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.793031 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.793553 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.793763 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.794000 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.795025 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.795399 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.795627 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.795845 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.796704 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.797257 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.797431 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.797456 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.797545 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.797617 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.790517 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.797667 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.797676 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.797699 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.797722 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.797742 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.797759 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.797789 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.797808 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.797834 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.797854 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.797874 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.797894 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.797914 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.797985 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798002 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798010 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798025 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798067 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798091 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798118 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798143 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798149 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798164 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798185 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798209 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798228 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798246 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798266 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798287 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798305 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798324 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798343 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798367 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798386 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798404 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798425 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798441 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798459 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798478 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798504 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798525 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798560 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798576 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798594 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798611 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798628 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798644 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798663 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798682 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798700 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798721 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798740 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798757 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798779 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798798 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798816 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798832 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798852 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798870 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798873 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798887 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798908 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798922 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.798927 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.799119 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.799165 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.799201 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.799223 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.799232 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.799395 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.799429 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.799455 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.799482 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.799510 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.799558 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.799589 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.799620 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.799650 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.799675 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.799707 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.799736 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.799761 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.799790 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.799819 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.799847 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.799876 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.799907 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.799935 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.799963 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.799990 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.800019 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.800045 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.800074 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.800104 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.800134 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.800171 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.800202 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.800236 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.800262 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.799250 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.799505 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.799566 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.799677 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.799816 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.799843 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.800080 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.800470 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.800175 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.800176 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.800314 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.800657 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.800873 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.801225 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.802625 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.802993 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.803047 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.803073 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.803094 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.803122 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.803151 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.803176 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.803205 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.803245 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.803265 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.803284 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.803305 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.803323 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.803344 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.803573 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.803639 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.803670 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.803693 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.803714 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.803734 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.803754 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.803776 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.803803 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.803864 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.803890 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.803918 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.803946 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.803966 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.803992 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.804018 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.804046 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.804076 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.804100 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.804127 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.804154 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.804183 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.804209 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.804236 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.804256 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.804273 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.804295 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.804314 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.804331 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.804371 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.804395 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.804452 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.804477 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.804504 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.804637 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.804665 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.804696 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.804725 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.804751 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.804759 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.804790 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.804819 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.804846 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.804874 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.804908 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/889fccf3-a82f-469b-97d3-094dc96045d4-hosts-file\") pod \"node-resolver-dgtfm\" (UID: \"889fccf3-a82f-469b-97d3-094dc96045d4\") " pod="openshift-dns/node-resolver-dgtfm" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.804938 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.804967 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.804995 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.805043 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.805221 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.805341 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.805461 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.805524 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.805708 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.805962 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.806016 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.806032 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.806276 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.806583 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.806612 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.806586 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.806786 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.806895 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.807226 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.807837 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.808078 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.808389 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.808890 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.809146 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.809940 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.809990 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: E1125 16:47:25.810056 4812 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.811554 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.811663 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.812233 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.812442 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.810047 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 16:47:25 crc kubenswrapper[4812]: E1125 16:47:25.810236 4812 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.810748 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.810888 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.810995 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.811026 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.817036 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.817224 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.817274 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.817507 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.817516 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.817940 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.818093 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.818229 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.818414 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.818793 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: E1125 16:47:25.818868 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 16:47:26.311164469 +0000 UTC m=+21.151306554 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 16:47:25 crc kubenswrapper[4812]: E1125 16:47:25.818908 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 16:47:26.318892955 +0000 UTC m=+21.159035050 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.818868 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819050 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819323 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.804996 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r4vvd\" (UniqueName: \"kubernetes.io/projected/889fccf3-a82f-469b-97d3-094dc96045d4-kube-api-access-r4vvd\") pod \"node-resolver-dgtfm\" (UID: \"889fccf3-a82f-469b-97d3-094dc96045d4\") " pod="openshift-dns/node-resolver-dgtfm" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819338 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819451 4812 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819465 4812 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819476 4812 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819491 4812 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819496 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819501 4812 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819554 4812 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819566 4812 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819607 4812 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819631 4812 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819660 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819673 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819685 4812 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819696 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819714 4812 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819724 4812 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819726 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819735 4812 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819747 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819761 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819771 4812 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819781 4812 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819794 4812 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819810 4812 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819822 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819832 4812 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819845 4812 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819857 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819871 4812 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819881 4812 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819893 4812 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819902 4812 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819913 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819924 4812 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819934 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819944 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819954 4812 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819966 4812 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819976 4812 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819986 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.819997 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.820008 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.820019 4812 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.820029 4812 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.820039 4812 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.820050 4812 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.821573 4812 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.821600 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.821612 4812 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.821622 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.821633 4812 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.821652 4812 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.821662 4812 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.821671 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.821680 4812 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.821689 4812 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.821698 4812 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.821709 4812 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.821719 4812 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.821728 4812 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.821738 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.821748 4812 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.821789 4812 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.821799 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.821809 4812 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.821818 4812 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.821828 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.821838 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.821847 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.821857 4812 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.821865 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.821874 4812 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.821883 4812 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.821892 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.821900 4812 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.821909 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.821919 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.821928 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.821938 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.821947 4812 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.821956 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.821965 4812 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.821974 4812 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.822211 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.816584 4812 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.823158 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.823621 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.823746 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.823854 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.823874 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.824205 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.824248 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.824327 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.824436 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.824611 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.824707 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.824750 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.824926 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.825013 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.825248 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.825271 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.825266 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.825548 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.825582 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.825908 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.826546 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.826632 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.826814 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.826836 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.826816 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.826960 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.827175 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.827474 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.827732 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.828123 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.828199 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.828222 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.828434 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.828547 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.828556 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.828712 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.828795 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.828809 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.828861 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.829074 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.829192 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.829271 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.829278 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: E1125 16:47:25.829400 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:47:26.329386615 +0000 UTC m=+21.169528710 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.829469 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.829712 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.829713 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.829954 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.829974 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.830236 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.830696 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.835152 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.835197 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.835687 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.835990 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.836346 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.836348 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.836382 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.836627 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.837067 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.837204 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.837369 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.837507 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.837522 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.837714 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.838061 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.839141 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.839662 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.839896 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.840451 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.840804 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.841161 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.841102 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.841316 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.841383 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.841380 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.841548 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.841586 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.841801 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.841811 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: E1125 16:47:25.842095 4812 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 16:47:25 crc kubenswrapper[4812]: E1125 16:47:25.842112 4812 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 16:47:25 crc kubenswrapper[4812]: E1125 16:47:25.842123 4812 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 16:47:25 crc kubenswrapper[4812]: E1125 16:47:25.842171 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 16:47:26.342156347 +0000 UTC m=+21.182298432 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.842269 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.842430 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.842590 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.842717 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.800641 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.842883 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.842987 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.843044 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.843062 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 16:47:25 crc kubenswrapper[4812]: E1125 16:47:25.843252 4812 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 16:47:25 crc kubenswrapper[4812]: E1125 16:47:25.843263 4812 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 16:47:25 crc kubenswrapper[4812]: E1125 16:47:25.843271 4812 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 16:47:25 crc kubenswrapper[4812]: E1125 16:47:25.843298 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 16:47:26.343290106 +0000 UTC m=+21.183432201 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.843404 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.843567 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.843670 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.843699 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.843519 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.843825 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.843867 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.844911 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.845010 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.845017 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.845154 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.845264 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.845441 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.845697 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.845716 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.848358 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.848819 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.849693 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.849932 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.850354 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.850748 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.850981 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.851779 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.852428 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.852972 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.853936 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.855034 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.855564 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.855759 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.857130 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.857931 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.859284 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.860037 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.860824 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.862404 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.863501 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.864023 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.864108 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.865908 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.866581 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.868169 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.868842 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.869233 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.869370 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.870578 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.871234 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.871774 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.872015 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.872830 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.873332 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.874738 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.875362 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.875665 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.876521 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.877373 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.878296 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.878974 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.879643 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.880520 4812 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.880686 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.882282 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.883241 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.884039 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.884473 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.886141 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.887195 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.887803 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.888923 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.889847 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.890332 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.891344 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.892376 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.893037 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.893118 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.895137 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.896089 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.897706 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.898830 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.900127 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.900705 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.901366 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.901579 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.902132 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.902734 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.903783 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.911489 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.919036 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.921174 4812 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407" exitCode=255 Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.921236 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407"} Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922236 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922355 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922419 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/889fccf3-a82f-469b-97d3-094dc96045d4-hosts-file\") pod \"node-resolver-dgtfm\" (UID: \"889fccf3-a82f-469b-97d3-094dc96045d4\") " pod="openshift-dns/node-resolver-dgtfm" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922452 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r4vvd\" (UniqueName: \"kubernetes.io/projected/889fccf3-a82f-469b-97d3-094dc96045d4-kube-api-access-r4vvd\") pod \"node-resolver-dgtfm\" (UID: \"889fccf3-a82f-469b-97d3-094dc96045d4\") " pod="openshift-dns/node-resolver-dgtfm" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922494 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922562 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922573 4812 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922582 4812 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922591 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922600 4812 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922612 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922624 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922634 4812 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922646 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922655 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922663 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922672 4812 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922674 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922668 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922681 4812 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922737 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/889fccf3-a82f-469b-97d3-094dc96045d4-hosts-file\") pod \"node-resolver-dgtfm\" (UID: \"889fccf3-a82f-469b-97d3-094dc96045d4\") " pod="openshift-dns/node-resolver-dgtfm" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922797 4812 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922813 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922824 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922833 4812 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922842 4812 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922851 4812 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922860 4812 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922870 4812 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922878 4812 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922888 4812 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922896 4812 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922905 4812 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922915 4812 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922923 4812 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922933 4812 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922942 4812 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922950 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922959 4812 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922968 4812 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922976 4812 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922985 4812 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.922993 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.923009 4812 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.923041 4812 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.923065 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.923078 4812 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.923089 4812 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.923103 4812 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.923120 4812 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.923138 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.923156 4812 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.923169 4812 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.923182 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.923197 4812 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.923216 4812 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.923232 4812 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.923246 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.923258 4812 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.923272 4812 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.923284 4812 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.923323 4812 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.923355 4812 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.923369 4812 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.923381 4812 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.923392 4812 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.923407 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.923422 4812 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.923599 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.923612 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.923886 4812 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.923899 4812 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.923917 4812 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.923938 4812 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.923951 4812 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.923963 4812 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.923974 4812 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.923991 4812 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924002 4812 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924014 4812 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924025 4812 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924041 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924055 4812 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924066 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924220 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924237 4812 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924251 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924264 4812 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924281 4812 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924293 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924303 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924313 4812 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924488 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924500 4812 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924511 4812 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924545 4812 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924560 4812 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924573 4812 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924584 4812 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924602 4812 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924614 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924627 4812 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924639 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924656 4812 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924669 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924682 4812 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924693 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924711 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924723 4812 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924733 4812 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924749 4812 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924759 4812 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924771 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924783 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924799 4812 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924810 4812 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924822 4812 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924847 4812 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924865 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924877 4812 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.924889 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.936722 4812 scope.go:117] "RemoveContainer" containerID="3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.937856 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.940077 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r4vvd\" (UniqueName: \"kubernetes.io/projected/889fccf3-a82f-469b-97d3-094dc96045d4-kube-api-access-r4vvd\") pod \"node-resolver-dgtfm\" (UID: \"889fccf3-a82f-469b-97d3-094dc96045d4\") " pod="openshift-dns/node-resolver-dgtfm" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.944610 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.954646 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.968137 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.979962 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.989114 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 16:47:25 crc kubenswrapper[4812]: I1125 16:47:25.996689 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 16:47:26 crc kubenswrapper[4812]: I1125 16:47:26.010596 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 16:47:26 crc kubenswrapper[4812]: I1125 16:47:26.021392 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 16:47:26 crc kubenswrapper[4812]: I1125 16:47:26.031015 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 16:47:26 crc kubenswrapper[4812]: I1125 16:47:26.049188 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 16:47:26 crc kubenswrapper[4812]: I1125 16:47:26.057086 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 16:47:26 crc kubenswrapper[4812]: I1125 16:47:26.066368 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 16:47:26 crc kubenswrapper[4812]: I1125 16:47:26.077927 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 16:47:26 crc kubenswrapper[4812]: I1125 16:47:26.086044 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 16:47:26 crc kubenswrapper[4812]: I1125 16:47:26.099389 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 16:47:26 crc kubenswrapper[4812]: I1125 16:47:26.107270 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 16:47:26 crc kubenswrapper[4812]: W1125 16:47:26.111239 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-98849c677fe4c9f2d64ac9f85df4edfcc692abd31a35b8157d8cea83e2ba8d49 WatchSource:0}: Error finding container 98849c677fe4c9f2d64ac9f85df4edfcc692abd31a35b8157d8cea83e2ba8d49: Status 404 returned error can't find the container with id 98849c677fe4c9f2d64ac9f85df4edfcc692abd31a35b8157d8cea83e2ba8d49 Nov 25 16:47:26 crc kubenswrapper[4812]: W1125 16:47:26.117009 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-df8fb20457729594d46280489ce48e0b01bde81f7255a16f3deb4b590386c7a1 WatchSource:0}: Error finding container df8fb20457729594d46280489ce48e0b01bde81f7255a16f3deb4b590386c7a1: Status 404 returned error can't find the container with id df8fb20457729594d46280489ce48e0b01bde81f7255a16f3deb4b590386c7a1 Nov 25 16:47:26 crc kubenswrapper[4812]: I1125 16:47:26.126602 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 16:47:26 crc kubenswrapper[4812]: W1125 16:47:26.140836 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-0fdab0ad95afd1b9be1b486c2c3149b153364e6c2a1d7464201160a50d769882 WatchSource:0}: Error finding container 0fdab0ad95afd1b9be1b486c2c3149b153364e6c2a1d7464201160a50d769882: Status 404 returned error can't find the container with id 0fdab0ad95afd1b9be1b486c2c3149b153364e6c2a1d7464201160a50d769882 Nov 25 16:47:26 crc kubenswrapper[4812]: I1125 16:47:26.144246 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-dgtfm" Nov 25 16:47:26 crc kubenswrapper[4812]: W1125 16:47:26.170047 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod889fccf3_a82f_469b_97d3_094dc96045d4.slice/crio-8e5280b0b9d545fc3d2a8e569ef3f52be6d214607178877b83d5828e31ccfd8c WatchSource:0}: Error finding container 8e5280b0b9d545fc3d2a8e569ef3f52be6d214607178877b83d5828e31ccfd8c: Status 404 returned error can't find the container with id 8e5280b0b9d545fc3d2a8e569ef3f52be6d214607178877b83d5828e31ccfd8c Nov 25 16:47:26 crc kubenswrapper[4812]: I1125 16:47:26.327728 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:47:26 crc kubenswrapper[4812]: I1125 16:47:26.327772 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:47:26 crc kubenswrapper[4812]: E1125 16:47:26.327860 4812 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 16:47:26 crc kubenswrapper[4812]: E1125 16:47:26.327897 4812 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 16:47:26 crc kubenswrapper[4812]: E1125 16:47:26.327917 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 16:47:27.327904461 +0000 UTC m=+22.168046556 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 16:47:26 crc kubenswrapper[4812]: E1125 16:47:26.327951 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 16:47:27.327935391 +0000 UTC m=+22.168077476 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 16:47:26 crc kubenswrapper[4812]: I1125 16:47:26.428197 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:47:26 crc kubenswrapper[4812]: I1125 16:47:26.428292 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:47:26 crc kubenswrapper[4812]: I1125 16:47:26.428322 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:47:26 crc kubenswrapper[4812]: E1125 16:47:26.428412 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:47:27.428355711 +0000 UTC m=+22.268497806 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:47:26 crc kubenswrapper[4812]: E1125 16:47:26.428430 4812 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 16:47:26 crc kubenswrapper[4812]: E1125 16:47:26.428446 4812 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 16:47:26 crc kubenswrapper[4812]: E1125 16:47:26.428456 4812 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 16:47:26 crc kubenswrapper[4812]: E1125 16:47:26.428504 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 16:47:27.428490464 +0000 UTC m=+22.268632559 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 16:47:26 crc kubenswrapper[4812]: E1125 16:47:26.428504 4812 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 16:47:26 crc kubenswrapper[4812]: E1125 16:47:26.428551 4812 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 16:47:26 crc kubenswrapper[4812]: E1125 16:47:26.428567 4812 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 16:47:26 crc kubenswrapper[4812]: E1125 16:47:26.428625 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 16:47:27.428607567 +0000 UTC m=+22.268749662 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 16:47:26 crc kubenswrapper[4812]: I1125 16:47:26.925354 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-dgtfm" event={"ID":"889fccf3-a82f-469b-97d3-094dc96045d4","Type":"ContainerStarted","Data":"f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e"} Nov 25 16:47:26 crc kubenswrapper[4812]: I1125 16:47:26.925421 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-dgtfm" event={"ID":"889fccf3-a82f-469b-97d3-094dc96045d4","Type":"ContainerStarted","Data":"8e5280b0b9d545fc3d2a8e569ef3f52be6d214607178877b83d5828e31ccfd8c"} Nov 25 16:47:26 crc kubenswrapper[4812]: I1125 16:47:26.927743 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e"} Nov 25 16:47:26 crc kubenswrapper[4812]: I1125 16:47:26.927772 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb"} Nov 25 16:47:26 crc kubenswrapper[4812]: I1125 16:47:26.927785 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"0fdab0ad95afd1b9be1b486c2c3149b153364e6c2a1d7464201160a50d769882"} Nov 25 16:47:26 crc kubenswrapper[4812]: I1125 16:47:26.929548 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"df8fb20457729594d46280489ce48e0b01bde81f7255a16f3deb4b590386c7a1"} Nov 25 16:47:26 crc kubenswrapper[4812]: I1125 16:47:26.930809 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb"} Nov 25 16:47:26 crc kubenswrapper[4812]: I1125 16:47:26.930833 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"98849c677fe4c9f2d64ac9f85df4edfcc692abd31a35b8157d8cea83e2ba8d49"} Nov 25 16:47:26 crc kubenswrapper[4812]: I1125 16:47:26.932709 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 25 16:47:26 crc kubenswrapper[4812]: I1125 16:47:26.934516 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8"} Nov 25 16:47:26 crc kubenswrapper[4812]: I1125 16:47:26.935018 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 16:47:26 crc kubenswrapper[4812]: I1125 16:47:26.945589 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:26Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:26 crc kubenswrapper[4812]: I1125 16:47:26.961881 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:26Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:26 crc kubenswrapper[4812]: I1125 16:47:26.974429 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:26Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:26 crc kubenswrapper[4812]: I1125 16:47:26.986081 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:26Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.002823 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.022360 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.044601 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.060505 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.073573 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.094825 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.116467 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.147215 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.164363 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.177447 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.189856 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.201712 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.336368 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.336429 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:47:27 crc kubenswrapper[4812]: E1125 16:47:27.336523 4812 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 16:47:27 crc kubenswrapper[4812]: E1125 16:47:27.336525 4812 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 16:47:27 crc kubenswrapper[4812]: E1125 16:47:27.336597 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 16:47:29.33658512 +0000 UTC m=+24.176727215 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 16:47:27 crc kubenswrapper[4812]: E1125 16:47:27.336648 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 16:47:29.336619971 +0000 UTC m=+24.176762246 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.437202 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.437269 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.437313 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:47:27 crc kubenswrapper[4812]: E1125 16:47:27.437437 4812 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 16:47:27 crc kubenswrapper[4812]: E1125 16:47:27.437455 4812 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 16:47:27 crc kubenswrapper[4812]: E1125 16:47:27.437468 4812 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 16:47:27 crc kubenswrapper[4812]: E1125 16:47:27.437487 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:47:29.437439771 +0000 UTC m=+24.277581866 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:47:27 crc kubenswrapper[4812]: E1125 16:47:27.437552 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 16:47:29.437542834 +0000 UTC m=+24.277684929 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 16:47:27 crc kubenswrapper[4812]: E1125 16:47:27.437646 4812 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 16:47:27 crc kubenswrapper[4812]: E1125 16:47:27.437702 4812 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 16:47:27 crc kubenswrapper[4812]: E1125 16:47:27.437722 4812 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 16:47:27 crc kubenswrapper[4812]: E1125 16:47:27.437818 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 16:47:29.437792411 +0000 UTC m=+24.277934506 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.831152 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.831191 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.831205 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:47:27 crc kubenswrapper[4812]: E1125 16:47:27.831298 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:47:27 crc kubenswrapper[4812]: E1125 16:47:27.831406 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:47:27 crc kubenswrapper[4812]: E1125 16:47:27.831465 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.835449 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.836069 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.836948 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-99qrk"] Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.837288 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-99qrk" Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.839471 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.840588 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.841865 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.842853 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.855889 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.874875 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.886803 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.898771 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.907287 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.915794 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.927499 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.941164 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/6a7a3183-ff61-40f7-aa03-af1e5c4252f1-serviceca\") pod \"node-ca-99qrk\" (UID: \"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\") " pod="openshift-image-registry/node-ca-99qrk" Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.941212 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ddnk7\" (UniqueName: \"kubernetes.io/projected/6a7a3183-ff61-40f7-aa03-af1e5c4252f1-kube-api-access-ddnk7\") pod \"node-ca-99qrk\" (UID: \"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\") " pod="openshift-image-registry/node-ca-99qrk" Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.941287 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6a7a3183-ff61-40f7-aa03-af1e5c4252f1-host\") pod \"node-ca-99qrk\" (UID: \"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\") " pod="openshift-image-registry/node-ca-99qrk" Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.945813 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:27 crc kubenswrapper[4812]: I1125 16:47:27.958941 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.042583 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6a7a3183-ff61-40f7-aa03-af1e5c4252f1-host\") pod \"node-ca-99qrk\" (UID: \"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\") " pod="openshift-image-registry/node-ca-99qrk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.042632 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/6a7a3183-ff61-40f7-aa03-af1e5c4252f1-serviceca\") pod \"node-ca-99qrk\" (UID: \"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\") " pod="openshift-image-registry/node-ca-99qrk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.042654 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ddnk7\" (UniqueName: \"kubernetes.io/projected/6a7a3183-ff61-40f7-aa03-af1e5c4252f1-kube-api-access-ddnk7\") pod \"node-ca-99qrk\" (UID: \"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\") " pod="openshift-image-registry/node-ca-99qrk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.042728 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6a7a3183-ff61-40f7-aa03-af1e5c4252f1-host\") pod \"node-ca-99qrk\" (UID: \"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\") " pod="openshift-image-registry/node-ca-99qrk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.043763 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/6a7a3183-ff61-40f7-aa03-af1e5c4252f1-serviceca\") pod \"node-ca-99qrk\" (UID: \"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\") " pod="openshift-image-registry/node-ca-99qrk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.065160 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ddnk7\" (UniqueName: \"kubernetes.io/projected/6a7a3183-ff61-40f7-aa03-af1e5c4252f1-kube-api-access-ddnk7\") pod \"node-ca-99qrk\" (UID: \"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\") " pod="openshift-image-registry/node-ca-99qrk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.148698 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-99qrk" Nov 25 16:47:28 crc kubenswrapper[4812]: W1125 16:47:28.160185 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6a7a3183_ff61_40f7_aa03_af1e5c4252f1.slice/crio-2a178f79f5cdf85b2047f45de9fe2f650b810c38e8321ea5ce21b0ae434b8ccb WatchSource:0}: Error finding container 2a178f79f5cdf85b2047f45de9fe2f650b810c38e8321ea5ce21b0ae434b8ccb: Status 404 returned error can't find the container with id 2a178f79f5cdf85b2047f45de9fe2f650b810c38e8321ea5ce21b0ae434b8ccb Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.651737 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-lcgpx"] Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.652135 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.652273 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-hwqsk"] Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.652950 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.654879 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-gljt8"] Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.655247 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.655405 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-gljt8" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.655926 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-m7ndd"] Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.655948 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.656011 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.656044 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.656063 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.656151 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.656303 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.656745 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.656763 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.656856 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.657838 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.657904 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.658257 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.659445 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.660299 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.660315 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.660221 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.660800 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.660315 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.661198 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.667748 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:28Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.680790 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:28Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.692228 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:28Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.705782 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:28Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.716356 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ed911cf-2139-4b12-84ba-af635585ba29\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lcgpx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:28Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.726942 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:28Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.740958 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:28Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.749497 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:28Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.749769 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-run-ovn-kubernetes\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.749807 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-cni-bin\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.749830 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-hostroot\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.749853 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gmtpr\" (UniqueName: \"kubernetes.io/projected/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-kube-api-access-gmtpr\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.749872 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-host-var-lib-kubelet\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.749891 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-kubelet\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.749910 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-cni-netd\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.749924 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hhsb8\" (UniqueName: \"kubernetes.io/projected/8ed911cf-2139-4b12-84ba-af635585ba29-kube-api-access-hhsb8\") pod \"machine-config-daemon-lcgpx\" (UID: \"8ed911cf-2139-4b12-84ba-af635585ba29\") " pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.749938 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-run-netns\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.749956 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-var-lib-openvswitch\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.749984 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-run-ovn\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.750037 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-ovnkube-script-lib\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.750081 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-host-run-multus-certs\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.750111 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/eaeac0de-94b4-43d0-b72f-3a70c6d348c6-os-release\") pod \"multus-additional-cni-plugins-gljt8\" (UID: \"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\") " pod="openshift-multus/multus-additional-cni-plugins-gljt8" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.750143 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gmtjx\" (UniqueName: \"kubernetes.io/projected/eaeac0de-94b4-43d0-b72f-3a70c6d348c6-kube-api-access-gmtjx\") pod \"multus-additional-cni-plugins-gljt8\" (UID: \"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\") " pod="openshift-multus/multus-additional-cni-plugins-gljt8" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.750171 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-host-run-k8s-cni-cncf-io\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.750202 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-host-var-lib-cni-bin\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.750226 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-systemd-units\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.750246 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-run-openvswitch\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.750273 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/8ed911cf-2139-4b12-84ba-af635585ba29-proxy-tls\") pod \"machine-config-daemon-lcgpx\" (UID: \"8ed911cf-2139-4b12-84ba-af635585ba29\") " pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.750298 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/eaeac0de-94b4-43d0-b72f-3a70c6d348c6-cni-binary-copy\") pod \"multus-additional-cni-plugins-gljt8\" (UID: \"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\") " pod="openshift-multus/multus-additional-cni-plugins-gljt8" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.750337 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/eaeac0de-94b4-43d0-b72f-3a70c6d348c6-tuning-conf-dir\") pod \"multus-additional-cni-plugins-gljt8\" (UID: \"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\") " pod="openshift-multus/multus-additional-cni-plugins-gljt8" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.750373 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.750398 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-cnibin\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.750416 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-chd8d\" (UniqueName: \"kubernetes.io/projected/3a156756-3629-4bed-8de0-1019226b7f04-kube-api-access-chd8d\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.750437 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/3a156756-3629-4bed-8de0-1019226b7f04-cni-binary-copy\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.750560 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-slash\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.750584 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-env-overrides\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.750607 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-multus-socket-dir-parent\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.750627 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/8ed911cf-2139-4b12-84ba-af635585ba29-rootfs\") pod \"machine-config-daemon-lcgpx\" (UID: \"8ed911cf-2139-4b12-84ba-af635585ba29\") " pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.750648 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/eaeac0de-94b4-43d0-b72f-3a70c6d348c6-system-cni-dir\") pod \"multus-additional-cni-plugins-gljt8\" (UID: \"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\") " pod="openshift-multus/multus-additional-cni-plugins-gljt8" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.750662 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/8ed911cf-2139-4b12-84ba-af635585ba29-mcd-auth-proxy-config\") pod \"machine-config-daemon-lcgpx\" (UID: \"8ed911cf-2139-4b12-84ba-af635585ba29\") " pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.750680 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-node-log\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.750710 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-host-run-netns\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.750735 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-etc-kubernetes\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.750756 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/3a156756-3629-4bed-8de0-1019226b7f04-multus-daemon-config\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.750771 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/eaeac0de-94b4-43d0-b72f-3a70c6d348c6-cnibin\") pod \"multus-additional-cni-plugins-gljt8\" (UID: \"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\") " pod="openshift-multus/multus-additional-cni-plugins-gljt8" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.750790 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-system-cni-dir\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.750815 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-multus-cni-dir\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.750835 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-os-release\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.750851 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-multus-conf-dir\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.750872 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-host-var-lib-cni-multus\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.750901 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-etc-openvswitch\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.750923 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/eaeac0de-94b4-43d0-b72f-3a70c6d348c6-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-gljt8\" (UID: \"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\") " pod="openshift-multus/multus-additional-cni-plugins-gljt8" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.750940 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-run-systemd\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.750981 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-ovn-node-metrics-cert\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.751004 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-log-socket\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.751023 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-ovnkube-config\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.758520 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:28Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.772245 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:28Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.790348 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:28Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.803193 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:28Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.813291 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:28Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.822714 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ed911cf-2139-4b12-84ba-af635585ba29\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lcgpx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:28Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.834164 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gljt8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:28Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.846426 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m7ndd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a156756-3629-4bed-8de0-1019226b7f04\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chd8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m7ndd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:28Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.851355 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-multus-conf-dir\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.851383 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/eaeac0de-94b4-43d0-b72f-3a70c6d348c6-cnibin\") pod \"multus-additional-cni-plugins-gljt8\" (UID: \"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\") " pod="openshift-multus/multus-additional-cni-plugins-gljt8" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.851399 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-system-cni-dir\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.851413 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-multus-cni-dir\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.851432 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-os-release\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.851447 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-etc-openvswitch\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.851461 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-host-var-lib-cni-multus\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.851476 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/eaeac0de-94b4-43d0-b72f-3a70c6d348c6-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-gljt8\" (UID: \"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\") " pod="openshift-multus/multus-additional-cni-plugins-gljt8" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.851491 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-run-systemd\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.851504 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-log-socket\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.851518 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-ovnkube-config\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.851520 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-multus-conf-dir\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.851520 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/eaeac0de-94b4-43d0-b72f-3a70c6d348c6-cnibin\") pod \"multus-additional-cni-plugins-gljt8\" (UID: \"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\") " pod="openshift-multus/multus-additional-cni-plugins-gljt8" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.851549 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-ovn-node-metrics-cert\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.851618 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-etc-openvswitch\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.851638 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-run-ovn-kubernetes\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.851660 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-run-ovn-kubernetes\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.851669 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-cni-bin\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.851690 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-hostroot\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.851690 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-system-cni-dir\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.851708 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-multus-cni-dir\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.851713 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-kubelet\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.851700 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-log-socket\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.851736 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-kubelet\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.851766 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-hostroot\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.851771 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-run-systemd\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.851785 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-cni-bin\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.851820 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-host-var-lib-cni-multus\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.851838 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-cni-netd\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.851893 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gmtpr\" (UniqueName: \"kubernetes.io/projected/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-kube-api-access-gmtpr\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.851946 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-cni-netd\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.851955 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-host-var-lib-kubelet\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.851981 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-host-run-multus-certs\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852001 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/eaeac0de-94b4-43d0-b72f-3a70c6d348c6-os-release\") pod \"multus-additional-cni-plugins-gljt8\" (UID: \"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\") " pod="openshift-multus/multus-additional-cni-plugins-gljt8" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852022 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gmtjx\" (UniqueName: \"kubernetes.io/projected/eaeac0de-94b4-43d0-b72f-3a70c6d348c6-kube-api-access-gmtjx\") pod \"multus-additional-cni-plugins-gljt8\" (UID: \"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\") " pod="openshift-multus/multus-additional-cni-plugins-gljt8" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852025 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-host-var-lib-kubelet\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852041 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-host-run-multus-certs\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852045 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hhsb8\" (UniqueName: \"kubernetes.io/projected/8ed911cf-2139-4b12-84ba-af635585ba29-kube-api-access-hhsb8\") pod \"machine-config-daemon-lcgpx\" (UID: \"8ed911cf-2139-4b12-84ba-af635585ba29\") " pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852100 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-run-netns\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852125 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-var-lib-openvswitch\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852152 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-run-ovn\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852175 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-ovnkube-script-lib\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852195 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-systemd-units\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852198 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-run-netns\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852206 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-var-lib-openvswitch\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852221 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-run-openvswitch\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852262 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-run-ovn\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852280 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/eaeac0de-94b4-43d0-b72f-3a70c6d348c6-os-release\") pod \"multus-additional-cni-plugins-gljt8\" (UID: \"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\") " pod="openshift-multus/multus-additional-cni-plugins-gljt8" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852269 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/eaeac0de-94b4-43d0-b72f-3a70c6d348c6-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-gljt8\" (UID: \"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\") " pod="openshift-multus/multus-additional-cni-plugins-gljt8" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852307 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-run-openvswitch\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852427 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-ovnkube-config\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852326 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-systemd-units\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852398 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-host-run-k8s-cni-cncf-io\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852316 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-host-run-k8s-cni-cncf-io\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852495 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-host-var-lib-cni-bin\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852518 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/8ed911cf-2139-4b12-84ba-af635585ba29-proxy-tls\") pod \"machine-config-daemon-lcgpx\" (UID: \"8ed911cf-2139-4b12-84ba-af635585ba29\") " pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852568 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-chd8d\" (UniqueName: \"kubernetes.io/projected/3a156756-3629-4bed-8de0-1019226b7f04-kube-api-access-chd8d\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852578 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-host-var-lib-cni-bin\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852595 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/eaeac0de-94b4-43d0-b72f-3a70c6d348c6-cni-binary-copy\") pod \"multus-additional-cni-plugins-gljt8\" (UID: \"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\") " pod="openshift-multus/multus-additional-cni-plugins-gljt8" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852615 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/eaeac0de-94b4-43d0-b72f-3a70c6d348c6-tuning-conf-dir\") pod \"multus-additional-cni-plugins-gljt8\" (UID: \"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\") " pod="openshift-multus/multus-additional-cni-plugins-gljt8" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852632 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852651 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-cnibin\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852676 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/3a156756-3629-4bed-8de0-1019226b7f04-cni-binary-copy\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852700 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-env-overrides\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852728 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-slash\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852743 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/8ed911cf-2139-4b12-84ba-af635585ba29-rootfs\") pod \"machine-config-daemon-lcgpx\" (UID: \"8ed911cf-2139-4b12-84ba-af635585ba29\") " pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852788 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-multus-socket-dir-parent\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852802 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-node-log\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852821 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/eaeac0de-94b4-43d0-b72f-3a70c6d348c6-system-cni-dir\") pod \"multus-additional-cni-plugins-gljt8\" (UID: \"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\") " pod="openshift-multus/multus-additional-cni-plugins-gljt8" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852837 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/8ed911cf-2139-4b12-84ba-af635585ba29-mcd-auth-proxy-config\") pod \"machine-config-daemon-lcgpx\" (UID: \"8ed911cf-2139-4b12-84ba-af635585ba29\") " pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852853 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-host-run-netns\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852871 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/3a156756-3629-4bed-8de0-1019226b7f04-multus-daemon-config\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852878 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-ovnkube-script-lib\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852914 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852920 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-etc-kubernetes\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852887 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-etc-kubernetes\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852952 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-cnibin\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.852988 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-node-log\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.853429 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-env-overrides\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.853470 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-slash\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.853496 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/eaeac0de-94b4-43d0-b72f-3a70c6d348c6-cni-binary-copy\") pod \"multus-additional-cni-plugins-gljt8\" (UID: \"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\") " pod="openshift-multus/multus-additional-cni-plugins-gljt8" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.853498 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/8ed911cf-2139-4b12-84ba-af635585ba29-rootfs\") pod \"machine-config-daemon-lcgpx\" (UID: \"8ed911cf-2139-4b12-84ba-af635585ba29\") " pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.853555 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-multus-socket-dir-parent\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.853560 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/3a156756-3629-4bed-8de0-1019226b7f04-cni-binary-copy\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.853583 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/eaeac0de-94b4-43d0-b72f-3a70c6d348c6-tuning-conf-dir\") pod \"multus-additional-cni-plugins-gljt8\" (UID: \"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\") " pod="openshift-multus/multus-additional-cni-plugins-gljt8" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.853615 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/eaeac0de-94b4-43d0-b72f-3a70c6d348c6-system-cni-dir\") pod \"multus-additional-cni-plugins-gljt8\" (UID: \"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\") " pod="openshift-multus/multus-additional-cni-plugins-gljt8" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.853615 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-host-run-netns\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.853643 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/8ed911cf-2139-4b12-84ba-af635585ba29-mcd-auth-proxy-config\") pod \"machine-config-daemon-lcgpx\" (UID: \"8ed911cf-2139-4b12-84ba-af635585ba29\") " pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.853958 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/3a156756-3629-4bed-8de0-1019226b7f04-multus-daemon-config\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.854031 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/3a156756-3629-4bed-8de0-1019226b7f04-os-release\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.855481 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-ovn-node-metrics-cert\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.855733 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/8ed911cf-2139-4b12-84ba-af635585ba29-proxy-tls\") pod \"machine-config-daemon-lcgpx\" (UID: \"8ed911cf-2139-4b12-84ba-af635585ba29\") " pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.858891 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:28Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.866842 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-chd8d\" (UniqueName: \"kubernetes.io/projected/3a156756-3629-4bed-8de0-1019226b7f04-kube-api-access-chd8d\") pod \"multus-m7ndd\" (UID: \"3a156756-3629-4bed-8de0-1019226b7f04\") " pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.868202 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gmtjx\" (UniqueName: \"kubernetes.io/projected/eaeac0de-94b4-43d0-b72f-3a70c6d348c6-kube-api-access-gmtjx\") pod \"multus-additional-cni-plugins-gljt8\" (UID: \"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\") " pod="openshift-multus/multus-additional-cni-plugins-gljt8" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.869356 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gmtpr\" (UniqueName: \"kubernetes.io/projected/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-kube-api-access-gmtpr\") pod \"ovnkube-node-hwqsk\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.871186 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hhsb8\" (UniqueName: \"kubernetes.io/projected/8ed911cf-2139-4b12-84ba-af635585ba29-kube-api-access-hhsb8\") pod \"machine-config-daemon-lcgpx\" (UID: \"8ed911cf-2139-4b12-84ba-af635585ba29\") " pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.871345 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:28Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.881606 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:28Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.889524 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:28Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.898000 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:28Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.915358 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hwqsk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:28Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.932559 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:28Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.940765 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-99qrk" event={"ID":"6a7a3183-ff61-40f7-aa03-af1e5c4252f1","Type":"ContainerStarted","Data":"67213ffbc314ee091b39df4df290d4cbe30d310a724e99a5d797592d9b0a333c"} Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.940816 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-99qrk" event={"ID":"6a7a3183-ff61-40f7-aa03-af1e5c4252f1","Type":"ContainerStarted","Data":"2a178f79f5cdf85b2047f45de9fe2f650b810c38e8321ea5ce21b0ae434b8ccb"} Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.943873 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"78c00c81fd5911a4d89ffcb8e555dbe88604cc35b9095a07a9c4d791d26ec9ca"} Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.958088 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:28Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.968191 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.970338 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:28Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.975068 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:28 crc kubenswrapper[4812]: W1125 16:47:28.978028 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8ed911cf_2139_4b12_84ba_af635585ba29.slice/crio-416580a8e9c578d62e57132747c151c3b50f234f2018fef5b54a8319e894cf2f WatchSource:0}: Error finding container 416580a8e9c578d62e57132747c151c3b50f234f2018fef5b54a8319e894cf2f: Status 404 returned error can't find the container with id 416580a8e9c578d62e57132747c151c3b50f234f2018fef5b54a8319e894cf2f Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.982043 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-gljt8" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.982235 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:28Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:28 crc kubenswrapper[4812]: W1125 16:47:28.985378 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbc4dc9ff_11a1_4151_91f0_3ff83020b3b9.slice/crio-c5dd744dc9a8b03cba08022a22ca248c94ef83344b70f817ed416332dbf1a2aa WatchSource:0}: Error finding container c5dd744dc9a8b03cba08022a22ca248c94ef83344b70f817ed416332dbf1a2aa: Status 404 returned error can't find the container with id c5dd744dc9a8b03cba08022a22ca248c94ef83344b70f817ed416332dbf1a2aa Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.986867 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-m7ndd" Nov 25 16:47:28 crc kubenswrapper[4812]: I1125 16:47:28.997636 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:28Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:29 crc kubenswrapper[4812]: W1125 16:47:29.000109 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podeaeac0de_94b4_43d0_b72f_3a70c6d348c6.slice/crio-5b97bc3fc5f3268b7c65c1d389c3e5d6a691baa58d52db421ed99688e595581f WatchSource:0}: Error finding container 5b97bc3fc5f3268b7c65c1d389c3e5d6a691baa58d52db421ed99688e595581f: Status 404 returned error can't find the container with id 5b97bc3fc5f3268b7c65c1d389c3e5d6a691baa58d52db421ed99688e595581f Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.015868 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ed911cf-2139-4b12-84ba-af635585ba29\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lcgpx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:29Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.030626 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gljt8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:29Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.043833 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m7ndd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a156756-3629-4bed-8de0-1019226b7f04\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chd8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m7ndd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:29Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.056334 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:29Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.068711 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:29Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.078319 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:29Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.087818 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67213ffbc314ee091b39df4df290d4cbe30d310a724e99a5d797592d9b0a333c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:29Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.106745 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hwqsk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:29Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.120130 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:29Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.132784 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:29Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.144411 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m7ndd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a156756-3629-4bed-8de0-1019226b7f04\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chd8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m7ndd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:29Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.155749 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:29Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.166137 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:29Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.176460 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ed911cf-2139-4b12-84ba-af635585ba29\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lcgpx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:29Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.188525 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gljt8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:29Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.206344 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hwqsk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:29Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.219203 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:29Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.231364 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:29Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.244035 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78c00c81fd5911a4d89ffcb8e555dbe88604cc35b9095a07a9c4d791d26ec9ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:29Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.252689 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:29Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.263388 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67213ffbc314ee091b39df4df290d4cbe30d310a724e99a5d797592d9b0a333c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:29Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.274657 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:29Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.358491 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.358572 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:47:29 crc kubenswrapper[4812]: E1125 16:47:29.358656 4812 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 16:47:29 crc kubenswrapper[4812]: E1125 16:47:29.358717 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 16:47:33.358699717 +0000 UTC m=+28.198841812 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 16:47:29 crc kubenswrapper[4812]: E1125 16:47:29.358843 4812 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 16:47:29 crc kubenswrapper[4812]: E1125 16:47:29.358978 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 16:47:33.358951213 +0000 UTC m=+28.199093348 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.459728 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.459847 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.459897 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:47:29 crc kubenswrapper[4812]: E1125 16:47:29.460027 4812 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 16:47:29 crc kubenswrapper[4812]: E1125 16:47:29.460046 4812 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 16:47:29 crc kubenswrapper[4812]: E1125 16:47:29.460057 4812 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 16:47:29 crc kubenswrapper[4812]: E1125 16:47:29.460139 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:47:33.460058292 +0000 UTC m=+28.300200397 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:47:29 crc kubenswrapper[4812]: E1125 16:47:29.460183 4812 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 16:47:29 crc kubenswrapper[4812]: E1125 16:47:29.460200 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 16:47:33.460187805 +0000 UTC m=+28.300330100 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 16:47:29 crc kubenswrapper[4812]: E1125 16:47:29.460210 4812 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 16:47:29 crc kubenswrapper[4812]: E1125 16:47:29.460226 4812 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 16:47:29 crc kubenswrapper[4812]: E1125 16:47:29.460273 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 16:47:33.460252847 +0000 UTC m=+28.300394952 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.830652 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.830709 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.830732 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:47:29 crc kubenswrapper[4812]: E1125 16:47:29.830775 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:47:29 crc kubenswrapper[4812]: E1125 16:47:29.830828 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:47:29 crc kubenswrapper[4812]: E1125 16:47:29.830893 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.834891 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.846879 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.846896 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:29Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.847513 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.860647 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:29Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.871824 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ed911cf-2139-4b12-84ba-af635585ba29\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lcgpx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:29Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.884889 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gljt8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:29Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.896763 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m7ndd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a156756-3629-4bed-8de0-1019226b7f04\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chd8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m7ndd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:29Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.907689 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:29Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.918327 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:29Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.926689 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:29Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.934438 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67213ffbc314ee091b39df4df290d4cbe30d310a724e99a5d797592d9b0a333c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:29Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.946612 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-m7ndd" event={"ID":"3a156756-3629-4bed-8de0-1019226b7f04","Type":"ContainerStarted","Data":"720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824"} Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.946660 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-m7ndd" event={"ID":"3a156756-3629-4bed-8de0-1019226b7f04","Type":"ContainerStarted","Data":"1fad40e1651de07e8e86be919942984ed19dfe0f3dcfdc44dfdcb4fa37b481dc"} Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.947961 4812 generic.go:334] "Generic (PLEG): container finished" podID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerID="a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740" exitCode=0 Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.948014 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" event={"ID":"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9","Type":"ContainerDied","Data":"a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740"} Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.948092 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" event={"ID":"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9","Type":"ContainerStarted","Data":"c5dd744dc9a8b03cba08022a22ca248c94ef83344b70f817ed416332dbf1a2aa"} Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.949358 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" event={"ID":"8ed911cf-2139-4b12-84ba-af635585ba29","Type":"ContainerStarted","Data":"7b0d9cef2c2ebce63dec5606b3f472a8ac27c7d5353c9e5ea12a9243e699ecb6"} Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.949396 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" event={"ID":"8ed911cf-2139-4b12-84ba-af635585ba29","Type":"ContainerStarted","Data":"e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6"} Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.949409 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" event={"ID":"8ed911cf-2139-4b12-84ba-af635585ba29","Type":"ContainerStarted","Data":"416580a8e9c578d62e57132747c151c3b50f234f2018fef5b54a8319e894cf2f"} Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.950605 4812 generic.go:334] "Generic (PLEG): container finished" podID="eaeac0de-94b4-43d0-b72f-3a70c6d348c6" containerID="571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19" exitCode=0 Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.950687 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" event={"ID":"eaeac0de-94b4-43d0-b72f-3a70c6d348c6","Type":"ContainerDied","Data":"571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19"} Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.950723 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" event={"ID":"eaeac0de-94b4-43d0-b72f-3a70c6d348c6","Type":"ContainerStarted","Data":"5b97bc3fc5f3268b7c65c1d389c3e5d6a691baa58d52db421ed99688e595581f"} Nov 25 16:47:29 crc kubenswrapper[4812]: I1125 16:47:29.953643 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hwqsk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:29Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:30 crc kubenswrapper[4812]: I1125 16:47:30.004491 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:30Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:30 crc kubenswrapper[4812]: I1125 16:47:30.046692 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:30Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:30 crc kubenswrapper[4812]: I1125 16:47:30.059901 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78c00c81fd5911a4d89ffcb8e555dbe88604cc35b9095a07a9c4d791d26ec9ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:30Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:30 crc kubenswrapper[4812]: I1125 16:47:30.072959 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:30Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:30 crc kubenswrapper[4812]: I1125 16:47:30.090855 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c2ba033-91e8-49d8-8474-ebc2dfaeef15\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://179d394510edea595ed1ac39619911e2167f2b9cb5d228635609760932f428e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effc25d4eb530d685c8a7a0acebd25c418c3fc3675b367fe9ab49ae7c22a1cf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7668dab7cab029814b4edd7225c0a6fac9481f4e74f0ab906661b28775cdde45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://69a98172920b9a903cb0defe4630d0c6c1bb43d466ff352eb4d60e36fa18ea23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96c0e65aabd958b32841ea475624de4b58ff5c8690a5c767f6548872f6cc7ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:30Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:30 crc kubenswrapper[4812]: I1125 16:47:30.103704 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:30Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:30 crc kubenswrapper[4812]: I1125 16:47:30.117401 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:30Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:30 crc kubenswrapper[4812]: I1125 16:47:30.126946 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ed911cf-2139-4b12-84ba-af635585ba29\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b0d9cef2c2ebce63dec5606b3f472a8ac27c7d5353c9e5ea12a9243e699ecb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lcgpx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:30Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:30 crc kubenswrapper[4812]: I1125 16:47:30.141678 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gljt8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:30Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:30 crc kubenswrapper[4812]: I1125 16:47:30.153306 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m7ndd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a156756-3629-4bed-8de0-1019226b7f04\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chd8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m7ndd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:30Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:30 crc kubenswrapper[4812]: I1125 16:47:30.165496 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:30Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:30 crc kubenswrapper[4812]: I1125 16:47:30.175022 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78c00c81fd5911a4d89ffcb8e555dbe88604cc35b9095a07a9c4d791d26ec9ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:30Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:30 crc kubenswrapper[4812]: I1125 16:47:30.185687 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:30Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:30 crc kubenswrapper[4812]: I1125 16:47:30.194936 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67213ffbc314ee091b39df4df290d4cbe30d310a724e99a5d797592d9b0a333c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:30Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:30 crc kubenswrapper[4812]: I1125 16:47:30.213825 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hwqsk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:30Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:30 crc kubenswrapper[4812]: I1125 16:47:30.227358 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:30Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:30 crc kubenswrapper[4812]: I1125 16:47:30.240634 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:30Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:30 crc kubenswrapper[4812]: I1125 16:47:30.964714 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" event={"ID":"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9","Type":"ContainerStarted","Data":"6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa"} Nov 25 16:47:30 crc kubenswrapper[4812]: I1125 16:47:30.965137 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" event={"ID":"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9","Type":"ContainerStarted","Data":"8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd"} Nov 25 16:47:30 crc kubenswrapper[4812]: I1125 16:47:30.965148 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" event={"ID":"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9","Type":"ContainerStarted","Data":"93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e"} Nov 25 16:47:30 crc kubenswrapper[4812]: I1125 16:47:30.966390 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" event={"ID":"eaeac0de-94b4-43d0-b72f-3a70c6d348c6","Type":"ContainerStarted","Data":"b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb"} Nov 25 16:47:30 crc kubenswrapper[4812]: I1125 16:47:30.977945 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78c00c81fd5911a4d89ffcb8e555dbe88604cc35b9095a07a9c4d791d26ec9ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:30Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:30 crc kubenswrapper[4812]: I1125 16:47:30.986618 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:30Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:30 crc kubenswrapper[4812]: I1125 16:47:30.995612 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67213ffbc314ee091b39df4df290d4cbe30d310a724e99a5d797592d9b0a333c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:30Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.015467 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hwqsk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.027582 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.037161 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.047344 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.071459 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c2ba033-91e8-49d8-8474-ebc2dfaeef15\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://179d394510edea595ed1ac39619911e2167f2b9cb5d228635609760932f428e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effc25d4eb530d685c8a7a0acebd25c418c3fc3675b367fe9ab49ae7c22a1cf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7668dab7cab029814b4edd7225c0a6fac9481f4e74f0ab906661b28775cdde45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://69a98172920b9a903cb0defe4630d0c6c1bb43d466ff352eb4d60e36fa18ea23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96c0e65aabd958b32841ea475624de4b58ff5c8690a5c767f6548872f6cc7ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.085905 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.097891 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.109824 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ed911cf-2139-4b12-84ba-af635585ba29\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b0d9cef2c2ebce63dec5606b3f472a8ac27c7d5353c9e5ea12a9243e699ecb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lcgpx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.124460 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gljt8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.147244 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m7ndd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a156756-3629-4bed-8de0-1019226b7f04\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chd8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m7ndd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.161843 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.613880 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.618702 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.623216 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.626672 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.635455 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67213ffbc314ee091b39df4df290d4cbe30d310a724e99a5d797592d9b0a333c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.652974 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hwqsk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.666404 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.679328 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.691323 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78c00c81fd5911a4d89ffcb8e555dbe88604cc35b9095a07a9c4d791d26ec9ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.703545 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.721060 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c2ba033-91e8-49d8-8474-ebc2dfaeef15\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://179d394510edea595ed1ac39619911e2167f2b9cb5d228635609760932f428e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effc25d4eb530d685c8a7a0acebd25c418c3fc3675b367fe9ab49ae7c22a1cf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7668dab7cab029814b4edd7225c0a6fac9481f4e74f0ab906661b28775cdde45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://69a98172920b9a903cb0defe4630d0c6c1bb43d466ff352eb4d60e36fa18ea23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96c0e65aabd958b32841ea475624de4b58ff5c8690a5c767f6548872f6cc7ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.733380 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.743574 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ed911cf-2139-4b12-84ba-af635585ba29\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b0d9cef2c2ebce63dec5606b3f472a8ac27c7d5353c9e5ea12a9243e699ecb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lcgpx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.758032 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gljt8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.770610 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m7ndd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a156756-3629-4bed-8de0-1019226b7f04\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chd8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m7ndd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.782176 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.794147 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.804052 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3da77f06-36fd-465a-b764-fabfaca65715\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc3351f45003a0574af7993746039012d2cdb2d17ac1dd8b1f8b629dd4d921f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://794035d1a2dbe8b5a1d28a4174ca6729fed6f236749008a13826d8541edd5a21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d11e018db2635e10a6a329a03047f13b3ddf47115077c36a596b76bdc43275c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c173b4b996674926da742cdbbfe34c03e6711cf1005fff5faf16a266cfffec0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.814314 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.825013 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.831101 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.831137 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:47:31 crc kubenswrapper[4812]: E1125 16:47:31.831244 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.831374 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:47:31 crc kubenswrapper[4812]: E1125 16:47:31.831560 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:47:31 crc kubenswrapper[4812]: E1125 16:47:31.831404 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.835242 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ed911cf-2139-4b12-84ba-af635585ba29\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b0d9cef2c2ebce63dec5606b3f472a8ac27c7d5353c9e5ea12a9243e699ecb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lcgpx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.858379 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gljt8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.881276 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m7ndd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a156756-3629-4bed-8de0-1019226b7f04\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chd8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m7ndd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.899832 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.911205 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.919945 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78c00c81fd5911a4d89ffcb8e555dbe88604cc35b9095a07a9c4d791d26ec9ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.927690 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.935743 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67213ffbc314ee091b39df4df290d4cbe30d310a724e99a5d797592d9b0a333c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.951796 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hwqsk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.965740 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.973864 4812 generic.go:334] "Generic (PLEG): container finished" podID="eaeac0de-94b4-43d0-b72f-3a70c6d348c6" containerID="b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb" exitCode=0 Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.973920 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" event={"ID":"eaeac0de-94b4-43d0-b72f-3a70c6d348c6","Type":"ContainerDied","Data":"b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb"} Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.977668 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" event={"ID":"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9","Type":"ContainerStarted","Data":"c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f"} Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.977794 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" event={"ID":"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9","Type":"ContainerStarted","Data":"37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab"} Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.977878 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" event={"ID":"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9","Type":"ContainerStarted","Data":"ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d"} Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.985556 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c2ba033-91e8-49d8-8474-ebc2dfaeef15\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://179d394510edea595ed1ac39619911e2167f2b9cb5d228635609760932f428e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effc25d4eb530d685c8a7a0acebd25c418c3fc3675b367fe9ab49ae7c22a1cf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7668dab7cab029814b4edd7225c0a6fac9481f4e74f0ab906661b28775cdde45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://69a98172920b9a903cb0defe4630d0c6c1bb43d466ff352eb4d60e36fa18ea23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96c0e65aabd958b32841ea475624de4b58ff5c8690a5c767f6548872f6cc7ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:31 crc kubenswrapper[4812]: I1125 16:47:31.999051 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:31Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.016301 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c2ba033-91e8-49d8-8474-ebc2dfaeef15\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://179d394510edea595ed1ac39619911e2167f2b9cb5d228635609760932f428e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effc25d4eb530d685c8a7a0acebd25c418c3fc3675b367fe9ab49ae7c22a1cf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7668dab7cab029814b4edd7225c0a6fac9481f4e74f0ab906661b28775cdde45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://69a98172920b9a903cb0defe4630d0c6c1bb43d466ff352eb4d60e36fa18ea23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96c0e65aabd958b32841ea475624de4b58ff5c8690a5c767f6548872f6cc7ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:32Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.029446 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:32Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.041618 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3da77f06-36fd-465a-b764-fabfaca65715\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc3351f45003a0574af7993746039012d2cdb2d17ac1dd8b1f8b629dd4d921f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://794035d1a2dbe8b5a1d28a4174ca6729fed6f236749008a13826d8541edd5a21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d11e018db2635e10a6a329a03047f13b3ddf47115077c36a596b76bdc43275c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c173b4b996674926da742cdbbfe34c03e6711cf1005fff5faf16a266cfffec0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:32Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.082374 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:32Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.088349 4812 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.090270 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.090301 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.090309 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.090395 4812 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.141914 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:32Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.155852 4812 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.156157 4812 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.157489 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.157559 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.157574 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.157591 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.157603 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:32Z","lastTransitionTime":"2025-11-25T16:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:32 crc kubenswrapper[4812]: E1125 16:47:32.176702 4812 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"42bff2ed-94cf-457c-8bcf-017111af962a\\\",\\\"systemUUID\\\":\\\"93542aec-3cae-4037-9cb4-28e49d8b2f68\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:32Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.180353 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.180726 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.180739 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.180755 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.180768 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:32Z","lastTransitionTime":"2025-11-25T16:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:32 crc kubenswrapper[4812]: E1125 16:47:32.192619 4812 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"42bff2ed-94cf-457c-8bcf-017111af962a\\\",\\\"systemUUID\\\":\\\"93542aec-3cae-4037-9cb4-28e49d8b2f68\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:32Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.195566 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.195593 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.195604 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.195618 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.195652 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:32Z","lastTransitionTime":"2025-11-25T16:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.201418 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ed911cf-2139-4b12-84ba-af635585ba29\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b0d9cef2c2ebce63dec5606b3f472a8ac27c7d5353c9e5ea12a9243e699ecb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lcgpx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:32Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:32 crc kubenswrapper[4812]: E1125 16:47:32.206968 4812 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"42bff2ed-94cf-457c-8bcf-017111af962a\\\",\\\"systemUUID\\\":\\\"93542aec-3cae-4037-9cb4-28e49d8b2f68\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:32Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.210449 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.210481 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.210490 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.210507 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.210516 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:32Z","lastTransitionTime":"2025-11-25T16:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:32 crc kubenswrapper[4812]: E1125 16:47:32.221771 4812 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"42bff2ed-94cf-457c-8bcf-017111af962a\\\",\\\"systemUUID\\\":\\\"93542aec-3cae-4037-9cb4-28e49d8b2f68\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:32Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.224875 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.224902 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.224912 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.224927 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.224935 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:32Z","lastTransitionTime":"2025-11-25T16:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:32 crc kubenswrapper[4812]: E1125 16:47:32.235273 4812 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"42bff2ed-94cf-457c-8bcf-017111af962a\\\",\\\"systemUUID\\\":\\\"93542aec-3cae-4037-9cb4-28e49d8b2f68\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:32Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:32 crc kubenswrapper[4812]: E1125 16:47:32.235416 4812 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.237175 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.237209 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.237220 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.237236 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.237248 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:32Z","lastTransitionTime":"2025-11-25T16:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.244748 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gljt8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:32Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.283045 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m7ndd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a156756-3629-4bed-8de0-1019226b7f04\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chd8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m7ndd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:32Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.323148 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:32Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.339277 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.339325 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.339335 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.339352 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.339365 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:32Z","lastTransitionTime":"2025-11-25T16:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.362299 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:32Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.400798 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78c00c81fd5911a4d89ffcb8e555dbe88604cc35b9095a07a9c4d791d26ec9ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:32Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.441968 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.442003 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.441981 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:32Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.442318 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.442341 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.442363 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:32Z","lastTransitionTime":"2025-11-25T16:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.480991 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67213ffbc314ee091b39df4df290d4cbe30d310a724e99a5d797592d9b0a333c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:32Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.527692 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hwqsk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:32Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.545616 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.545663 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.545674 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.545690 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.545702 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:32Z","lastTransitionTime":"2025-11-25T16:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.562066 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:32Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.648831 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.648877 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.648887 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.648904 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.648913 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:32Z","lastTransitionTime":"2025-11-25T16:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.751339 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.751381 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.751410 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.751422 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.751433 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:32Z","lastTransitionTime":"2025-11-25T16:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.853896 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.853939 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.853947 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.853965 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.853977 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:32Z","lastTransitionTime":"2025-11-25T16:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.956520 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.956587 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.956600 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.956620 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.956632 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:32Z","lastTransitionTime":"2025-11-25T16:47:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.983626 4812 generic.go:334] "Generic (PLEG): container finished" podID="eaeac0de-94b4-43d0-b72f-3a70c6d348c6" containerID="5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53" exitCode=0 Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.983678 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" event={"ID":"eaeac0de-94b4-43d0-b72f-3a70c6d348c6","Type":"ContainerDied","Data":"5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53"} Nov 25 16:47:32 crc kubenswrapper[4812]: I1125 16:47:32.998198 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:32Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.011824 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:33Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.022324 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78c00c81fd5911a4d89ffcb8e555dbe88604cc35b9095a07a9c4d791d26ec9ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:33Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.032439 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:33Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.043923 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67213ffbc314ee091b39df4df290d4cbe30d310a724e99a5d797592d9b0a333c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:33Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.059085 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.059126 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.059143 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.059157 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.059169 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:33Z","lastTransitionTime":"2025-11-25T16:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.060292 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hwqsk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:33Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.073017 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:33Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.087055 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:33Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.104290 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c2ba033-91e8-49d8-8474-ebc2dfaeef15\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://179d394510edea595ed1ac39619911e2167f2b9cb5d228635609760932f428e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effc25d4eb530d685c8a7a0acebd25c418c3fc3675b367fe9ab49ae7c22a1cf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7668dab7cab029814b4edd7225c0a6fac9481f4e74f0ab906661b28775cdde45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://69a98172920b9a903cb0defe4630d0c6c1bb43d466ff352eb4d60e36fa18ea23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96c0e65aabd958b32841ea475624de4b58ff5c8690a5c767f6548872f6cc7ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:33Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.114836 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:33Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.126788 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:33Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.137047 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ed911cf-2139-4b12-84ba-af635585ba29\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b0d9cef2c2ebce63dec5606b3f472a8ac27c7d5353c9e5ea12a9243e699ecb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lcgpx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:33Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.150398 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gljt8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:33Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.161766 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.161810 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.161820 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.161834 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.161842 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:33Z","lastTransitionTime":"2025-11-25T16:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.162960 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m7ndd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a156756-3629-4bed-8de0-1019226b7f04\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chd8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m7ndd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:33Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.174328 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3da77f06-36fd-465a-b764-fabfaca65715\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc3351f45003a0574af7993746039012d2cdb2d17ac1dd8b1f8b629dd4d921f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://794035d1a2dbe8b5a1d28a4174ca6729fed6f236749008a13826d8541edd5a21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d11e018db2635e10a6a329a03047f13b3ddf47115077c36a596b76bdc43275c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c173b4b996674926da742cdbbfe34c03e6711cf1005fff5faf16a266cfffec0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:33Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.263723 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.263753 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.263760 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.263772 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.263781 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:33Z","lastTransitionTime":"2025-11-25T16:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.366019 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.366246 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.366311 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.366382 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.366443 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:33Z","lastTransitionTime":"2025-11-25T16:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.398624 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.398693 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:47:33 crc kubenswrapper[4812]: E1125 16:47:33.398776 4812 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 16:47:33 crc kubenswrapper[4812]: E1125 16:47:33.398799 4812 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 16:47:33 crc kubenswrapper[4812]: E1125 16:47:33.398834 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 16:47:41.398819741 +0000 UTC m=+36.238961846 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 16:47:33 crc kubenswrapper[4812]: E1125 16:47:33.398851 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 16:47:41.398843601 +0000 UTC m=+36.238985696 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.468591 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.468622 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.468633 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.468649 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.468662 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:33Z","lastTransitionTime":"2025-11-25T16:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.499240 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:47:33 crc kubenswrapper[4812]: E1125 16:47:33.499477 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:47:41.499447766 +0000 UTC m=+36.339589861 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.500069 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.500132 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:47:33 crc kubenswrapper[4812]: E1125 16:47:33.500275 4812 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 16:47:33 crc kubenswrapper[4812]: E1125 16:47:33.500306 4812 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 16:47:33 crc kubenswrapper[4812]: E1125 16:47:33.500305 4812 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 16:47:33 crc kubenswrapper[4812]: E1125 16:47:33.500318 4812 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 16:47:33 crc kubenswrapper[4812]: E1125 16:47:33.500327 4812 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 16:47:33 crc kubenswrapper[4812]: E1125 16:47:33.500335 4812 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 16:47:33 crc kubenswrapper[4812]: E1125 16:47:33.500374 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 16:47:41.50035783 +0000 UTC m=+36.340499925 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 16:47:33 crc kubenswrapper[4812]: E1125 16:47:33.500391 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 16:47:41.500383851 +0000 UTC m=+36.340525946 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.571863 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.572084 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.572176 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.572242 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.572334 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:33Z","lastTransitionTime":"2025-11-25T16:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.675058 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.675105 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.675120 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.675140 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.675154 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:33Z","lastTransitionTime":"2025-11-25T16:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.777212 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.777248 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.777259 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.777275 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.777288 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:33Z","lastTransitionTime":"2025-11-25T16:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.831356 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:47:33 crc kubenswrapper[4812]: E1125 16:47:33.831466 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.831356 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:47:33 crc kubenswrapper[4812]: E1125 16:47:33.831703 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.831821 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:47:33 crc kubenswrapper[4812]: E1125 16:47:33.831948 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.880145 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.880437 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.880445 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.880458 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.880467 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:33Z","lastTransitionTime":"2025-11-25T16:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.982740 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.982818 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.982841 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.982873 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.982894 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:33Z","lastTransitionTime":"2025-11-25T16:47:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.990368 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" event={"ID":"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9","Type":"ContainerStarted","Data":"e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6"} Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.992406 4812 generic.go:334] "Generic (PLEG): container finished" podID="eaeac0de-94b4-43d0-b72f-3a70c6d348c6" containerID="b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e" exitCode=0 Nov 25 16:47:33 crc kubenswrapper[4812]: I1125 16:47:33.992455 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" event={"ID":"eaeac0de-94b4-43d0-b72f-3a70c6d348c6","Type":"ContainerDied","Data":"b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e"} Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.007912 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gljt8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:34Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.019952 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m7ndd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a156756-3629-4bed-8de0-1019226b7f04\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chd8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m7ndd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:34Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.031314 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3da77f06-36fd-465a-b764-fabfaca65715\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc3351f45003a0574af7993746039012d2cdb2d17ac1dd8b1f8b629dd4d921f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://794035d1a2dbe8b5a1d28a4174ca6729fed6f236749008a13826d8541edd5a21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d11e018db2635e10a6a329a03047f13b3ddf47115077c36a596b76bdc43275c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c173b4b996674926da742cdbbfe34c03e6711cf1005fff5faf16a266cfffec0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:34Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.042465 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:34Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.053594 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:34Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.063605 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ed911cf-2139-4b12-84ba-af635585ba29\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b0d9cef2c2ebce63dec5606b3f472a8ac27c7d5353c9e5ea12a9243e699ecb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lcgpx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:34Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.072940 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67213ffbc314ee091b39df4df290d4cbe30d310a724e99a5d797592d9b0a333c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:34Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.085159 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.085383 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.085511 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.085634 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.085729 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:34Z","lastTransitionTime":"2025-11-25T16:47:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.090122 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hwqsk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:34Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.103449 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:34Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.114652 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:34Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.130289 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78c00c81fd5911a4d89ffcb8e555dbe88604cc35b9095a07a9c4d791d26ec9ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:34Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.139511 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:34Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.150766 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:34Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.171240 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c2ba033-91e8-49d8-8474-ebc2dfaeef15\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://179d394510edea595ed1ac39619911e2167f2b9cb5d228635609760932f428e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effc25d4eb530d685c8a7a0acebd25c418c3fc3675b367fe9ab49ae7c22a1cf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7668dab7cab029814b4edd7225c0a6fac9481f4e74f0ab906661b28775cdde45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://69a98172920b9a903cb0defe4630d0c6c1bb43d466ff352eb4d60e36fa18ea23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96c0e65aabd958b32841ea475624de4b58ff5c8690a5c767f6548872f6cc7ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:34Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.183875 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:34Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.188328 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.188490 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.188692 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.188802 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.188897 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:34Z","lastTransitionTime":"2025-11-25T16:47:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.291269 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.291583 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.291660 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.291740 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.291797 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:34Z","lastTransitionTime":"2025-11-25T16:47:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.394684 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.394732 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.394744 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.394762 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.394774 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:34Z","lastTransitionTime":"2025-11-25T16:47:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.497444 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.497475 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.497485 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.497500 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.497525 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:34Z","lastTransitionTime":"2025-11-25T16:47:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.599916 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.599950 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.599958 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.599971 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.599980 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:34Z","lastTransitionTime":"2025-11-25T16:47:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.702660 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.702717 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.702733 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.702755 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.702768 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:34Z","lastTransitionTime":"2025-11-25T16:47:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.804333 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.804370 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.804381 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.804397 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.804414 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:34Z","lastTransitionTime":"2025-11-25T16:47:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.906937 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.906981 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.906993 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.907009 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.907021 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:34Z","lastTransitionTime":"2025-11-25T16:47:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.998157 4812 generic.go:334] "Generic (PLEG): container finished" podID="eaeac0de-94b4-43d0-b72f-3a70c6d348c6" containerID="f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5" exitCode=0 Nov 25 16:47:34 crc kubenswrapper[4812]: I1125 16:47:34.998207 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" event={"ID":"eaeac0de-94b4-43d0-b72f-3a70c6d348c6","Type":"ContainerDied","Data":"f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5"} Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.009634 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.009670 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.009679 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.009693 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.009705 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:35Z","lastTransitionTime":"2025-11-25T16:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.012109 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67213ffbc314ee091b39df4df290d4cbe30d310a724e99a5d797592d9b0a333c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:35Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.030055 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hwqsk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:35Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.045174 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:35Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.056305 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:35Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.067453 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78c00c81fd5911a4d89ffcb8e555dbe88604cc35b9095a07a9c4d791d26ec9ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:35Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.076818 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:35Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.090656 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:35Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.107955 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c2ba033-91e8-49d8-8474-ebc2dfaeef15\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://179d394510edea595ed1ac39619911e2167f2b9cb5d228635609760932f428e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effc25d4eb530d685c8a7a0acebd25c418c3fc3675b367fe9ab49ae7c22a1cf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7668dab7cab029814b4edd7225c0a6fac9481f4e74f0ab906661b28775cdde45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://69a98172920b9a903cb0defe4630d0c6c1bb43d466ff352eb4d60e36fa18ea23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96c0e65aabd958b32841ea475624de4b58ff5c8690a5c767f6548872f6cc7ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:35Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.111607 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.111659 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.111671 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.111687 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.111698 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:35Z","lastTransitionTime":"2025-11-25T16:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.120956 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:35Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.133687 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gljt8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:35Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.145788 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m7ndd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a156756-3629-4bed-8de0-1019226b7f04\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chd8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m7ndd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:35Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.155918 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3da77f06-36fd-465a-b764-fabfaca65715\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc3351f45003a0574af7993746039012d2cdb2d17ac1dd8b1f8b629dd4d921f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://794035d1a2dbe8b5a1d28a4174ca6729fed6f236749008a13826d8541edd5a21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d11e018db2635e10a6a329a03047f13b3ddf47115077c36a596b76bdc43275c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c173b4b996674926da742cdbbfe34c03e6711cf1005fff5faf16a266cfffec0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:35Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.167402 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:35Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.177845 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:35Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.192925 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ed911cf-2139-4b12-84ba-af635585ba29\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b0d9cef2c2ebce63dec5606b3f472a8ac27c7d5353c9e5ea12a9243e699ecb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lcgpx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:35Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.213719 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.213756 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.213767 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.213781 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.213791 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:35Z","lastTransitionTime":"2025-11-25T16:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.316625 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.316670 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.316684 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.316701 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.316712 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:35Z","lastTransitionTime":"2025-11-25T16:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.419183 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.419221 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.419232 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.419246 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.419257 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:35Z","lastTransitionTime":"2025-11-25T16:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.521668 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.521705 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.521714 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.521729 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.521738 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:35Z","lastTransitionTime":"2025-11-25T16:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.623511 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.623583 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.623601 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.623618 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.623629 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:35Z","lastTransitionTime":"2025-11-25T16:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.726139 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.726181 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.726195 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.726215 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.726231 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:35Z","lastTransitionTime":"2025-11-25T16:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.828778 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.828827 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.828843 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.828864 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.828876 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:35Z","lastTransitionTime":"2025-11-25T16:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.831175 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.831197 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:47:35 crc kubenswrapper[4812]: E1125 16:47:35.831271 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.831284 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:47:35 crc kubenswrapper[4812]: E1125 16:47:35.831378 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:47:35 crc kubenswrapper[4812]: E1125 16:47:35.831460 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.851762 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c2ba033-91e8-49d8-8474-ebc2dfaeef15\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://179d394510edea595ed1ac39619911e2167f2b9cb5d228635609760932f428e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effc25d4eb530d685c8a7a0acebd25c418c3fc3675b367fe9ab49ae7c22a1cf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7668dab7cab029814b4edd7225c0a6fac9481f4e74f0ab906661b28775cdde45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://69a98172920b9a903cb0defe4630d0c6c1bb43d466ff352eb4d60e36fa18ea23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96c0e65aabd958b32841ea475624de4b58ff5c8690a5c767f6548872f6cc7ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:35Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.866097 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:35Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.882507 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m7ndd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a156756-3629-4bed-8de0-1019226b7f04\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chd8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m7ndd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:35Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.893684 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3da77f06-36fd-465a-b764-fabfaca65715\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc3351f45003a0574af7993746039012d2cdb2d17ac1dd8b1f8b629dd4d921f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://794035d1a2dbe8b5a1d28a4174ca6729fed6f236749008a13826d8541edd5a21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d11e018db2635e10a6a329a03047f13b3ddf47115077c36a596b76bdc43275c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c173b4b996674926da742cdbbfe34c03e6711cf1005fff5faf16a266cfffec0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:35Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.905127 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:35Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.914572 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:35Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.926039 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ed911cf-2139-4b12-84ba-af635585ba29\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b0d9cef2c2ebce63dec5606b3f472a8ac27c7d5353c9e5ea12a9243e699ecb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lcgpx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:35Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.930403 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.930442 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.930454 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.930470 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.930482 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:35Z","lastTransitionTime":"2025-11-25T16:47:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.939018 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gljt8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:35Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.960017 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hwqsk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:35Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.973501 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:35Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.984501 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:35Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:35 crc kubenswrapper[4812]: I1125 16:47:35.997266 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78c00c81fd5911a4d89ffcb8e555dbe88604cc35b9095a07a9c4d791d26ec9ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:35Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.004860 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" event={"ID":"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9","Type":"ContainerStarted","Data":"a4452064c763db39abe3d67a71b8742fb28999485085b61769d2b695eaab2436"} Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.005603 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.005623 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.009133 4812 generic.go:334] "Generic (PLEG): container finished" podID="eaeac0de-94b4-43d0-b72f-3a70c6d348c6" containerID="7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68" exitCode=0 Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.009211 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" event={"ID":"eaeac0de-94b4-43d0-b72f-3a70c6d348c6","Type":"ContainerDied","Data":"7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68"} Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.010997 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:36Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.022368 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67213ffbc314ee091b39df4df290d4cbe30d310a724e99a5d797592d9b0a333c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:36Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.032140 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.032918 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.033974 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.033998 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.034006 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.034017 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.034027 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:36Z","lastTransitionTime":"2025-11-25T16:47:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.037622 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:36Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.050068 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3da77f06-36fd-465a-b764-fabfaca65715\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc3351f45003a0574af7993746039012d2cdb2d17ac1dd8b1f8b629dd4d921f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://794035d1a2dbe8b5a1d28a4174ca6729fed6f236749008a13826d8541edd5a21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d11e018db2635e10a6a329a03047f13b3ddf47115077c36a596b76bdc43275c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c173b4b996674926da742cdbbfe34c03e6711cf1005fff5faf16a266cfffec0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:36Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.062040 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:36Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.078472 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:36Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.089618 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ed911cf-2139-4b12-84ba-af635585ba29\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b0d9cef2c2ebce63dec5606b3f472a8ac27c7d5353c9e5ea12a9243e699ecb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lcgpx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:36Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.101835 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gljt8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:36Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.113380 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m7ndd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a156756-3629-4bed-8de0-1019226b7f04\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chd8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m7ndd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:36Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.125665 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:36Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.136764 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.136799 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.136809 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.136825 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.136836 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:36Z","lastTransitionTime":"2025-11-25T16:47:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.137113 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:36Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.148003 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78c00c81fd5911a4d89ffcb8e555dbe88604cc35b9095a07a9c4d791d26ec9ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:36Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.160553 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:36Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.170246 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67213ffbc314ee091b39df4df290d4cbe30d310a724e99a5d797592d9b0a333c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:36Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.190580 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4452064c763db39abe3d67a71b8742fb28999485085b61769d2b695eaab2436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hwqsk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:36Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.204156 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:36Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.224950 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c2ba033-91e8-49d8-8474-ebc2dfaeef15\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://179d394510edea595ed1ac39619911e2167f2b9cb5d228635609760932f428e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effc25d4eb530d685c8a7a0acebd25c418c3fc3675b367fe9ab49ae7c22a1cf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7668dab7cab029814b4edd7225c0a6fac9481f4e74f0ab906661b28775cdde45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://69a98172920b9a903cb0defe4630d0c6c1bb43d466ff352eb4d60e36fa18ea23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96c0e65aabd958b32841ea475624de4b58ff5c8690a5c767f6548872f6cc7ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:36Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.237362 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:36Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.238365 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.238404 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.238413 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.238430 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.238439 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:36Z","lastTransitionTime":"2025-11-25T16:47:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.341484 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.341559 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.341574 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.341596 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.341610 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:36Z","lastTransitionTime":"2025-11-25T16:47:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.444074 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.444113 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.444122 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.444137 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.444148 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:36Z","lastTransitionTime":"2025-11-25T16:47:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.547691 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.547780 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.547803 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.547836 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.547860 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:36Z","lastTransitionTime":"2025-11-25T16:47:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.650081 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.650131 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.650142 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.650161 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.650173 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:36Z","lastTransitionTime":"2025-11-25T16:47:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.752445 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.752482 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.752490 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.752505 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.752516 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:36Z","lastTransitionTime":"2025-11-25T16:47:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.854411 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.854458 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.854470 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.854487 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.854499 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:36Z","lastTransitionTime":"2025-11-25T16:47:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.956947 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.957204 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.957218 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.957235 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:36 crc kubenswrapper[4812]: I1125 16:47:36.957246 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:36Z","lastTransitionTime":"2025-11-25T16:47:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.015786 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" event={"ID":"eaeac0de-94b4-43d0-b72f-3a70c6d348c6","Type":"ContainerStarted","Data":"1d398af8b4b16b15baa75821d0c30b6a72100aac425aca4e87fe9c1c2412673a"} Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.015831 4812 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.034333 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c2ba033-91e8-49d8-8474-ebc2dfaeef15\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://179d394510edea595ed1ac39619911e2167f2b9cb5d228635609760932f428e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effc25d4eb530d685c8a7a0acebd25c418c3fc3675b367fe9ab49ae7c22a1cf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7668dab7cab029814b4edd7225c0a6fac9481f4e74f0ab906661b28775cdde45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://69a98172920b9a903cb0defe4630d0c6c1bb43d466ff352eb4d60e36fa18ea23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96c0e65aabd958b32841ea475624de4b58ff5c8690a5c767f6548872f6cc7ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:37Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.047178 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:37Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.059702 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.059774 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.059789 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.059803 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.059814 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:37Z","lastTransitionTime":"2025-11-25T16:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.060993 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d398af8b4b16b15baa75821d0c30b6a72100aac425aca4e87fe9c1c2412673a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gljt8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:37Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.071589 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m7ndd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a156756-3629-4bed-8de0-1019226b7f04\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chd8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m7ndd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:37Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.082059 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3da77f06-36fd-465a-b764-fabfaca65715\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc3351f45003a0574af7993746039012d2cdb2d17ac1dd8b1f8b629dd4d921f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://794035d1a2dbe8b5a1d28a4174ca6729fed6f236749008a13826d8541edd5a21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d11e018db2635e10a6a329a03047f13b3ddf47115077c36a596b76bdc43275c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c173b4b996674926da742cdbbfe34c03e6711cf1005fff5faf16a266cfffec0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:37Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.093986 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:37Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.107906 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:37Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.117786 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ed911cf-2139-4b12-84ba-af635585ba29\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b0d9cef2c2ebce63dec5606b3f472a8ac27c7d5353c9e5ea12a9243e699ecb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lcgpx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:37Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.125651 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67213ffbc314ee091b39df4df290d4cbe30d310a724e99a5d797592d9b0a333c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:37Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.144837 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4452064c763db39abe3d67a71b8742fb28999485085b61769d2b695eaab2436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hwqsk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:37Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.157611 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:37Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.161877 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.161926 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.161943 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.161961 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.161972 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:37Z","lastTransitionTime":"2025-11-25T16:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.171249 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:37Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.182856 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78c00c81fd5911a4d89ffcb8e555dbe88604cc35b9095a07a9c4d791d26ec9ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:37Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.193944 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:37Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.204790 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:37Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.264566 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.264610 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.264621 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.264639 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.264651 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:37Z","lastTransitionTime":"2025-11-25T16:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.367047 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.367087 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.367096 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.367109 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.367118 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:37Z","lastTransitionTime":"2025-11-25T16:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.469899 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.469944 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.469955 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.469973 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.469984 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:37Z","lastTransitionTime":"2025-11-25T16:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.573164 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.573234 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.573259 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.573287 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.573306 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:37Z","lastTransitionTime":"2025-11-25T16:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.676309 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.676580 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.676594 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.676606 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.676615 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:37Z","lastTransitionTime":"2025-11-25T16:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.780503 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.780572 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.780584 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.780601 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.780613 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:37Z","lastTransitionTime":"2025-11-25T16:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.831098 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.831179 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:47:37 crc kubenswrapper[4812]: E1125 16:47:37.831227 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:47:37 crc kubenswrapper[4812]: E1125 16:47:37.831289 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.831344 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:47:37 crc kubenswrapper[4812]: E1125 16:47:37.831411 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.882762 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.882810 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.882818 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.882831 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.882840 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:37Z","lastTransitionTime":"2025-11-25T16:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.985153 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.985190 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.985200 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.985214 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:37 crc kubenswrapper[4812]: I1125 16:47:37.985225 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:37Z","lastTransitionTime":"2025-11-25T16:47:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.017976 4812 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.088197 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.088260 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.088269 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.088283 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.088292 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:38Z","lastTransitionTime":"2025-11-25T16:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.190642 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.190697 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.190709 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.190729 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.190743 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:38Z","lastTransitionTime":"2025-11-25T16:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.292946 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.292989 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.292998 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.293012 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.293021 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:38Z","lastTransitionTime":"2025-11-25T16:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.395139 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.395184 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.395196 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.395213 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.395239 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:38Z","lastTransitionTime":"2025-11-25T16:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.497478 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.497555 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.497568 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.497589 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.497602 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:38Z","lastTransitionTime":"2025-11-25T16:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.599778 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.599819 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.599831 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.599846 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.599857 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:38Z","lastTransitionTime":"2025-11-25T16:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.702131 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.702230 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.702251 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.702277 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.702294 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:38Z","lastTransitionTime":"2025-11-25T16:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.804715 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.804796 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.804822 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.804852 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.804871 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:38Z","lastTransitionTime":"2025-11-25T16:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.907116 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.907161 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.907178 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.907196 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:38 crc kubenswrapper[4812]: I1125 16:47:38.907211 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:38Z","lastTransitionTime":"2025-11-25T16:47:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.010226 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.010299 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.010318 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.010348 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.010369 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:39Z","lastTransitionTime":"2025-11-25T16:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.023321 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hwqsk_bc4dc9ff-11a1-4151-91f0-3ff83020b3b9/ovnkube-controller/0.log" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.026219 4812 generic.go:334] "Generic (PLEG): container finished" podID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerID="a4452064c763db39abe3d67a71b8742fb28999485085b61769d2b695eaab2436" exitCode=1 Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.026257 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" event={"ID":"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9","Type":"ContainerDied","Data":"a4452064c763db39abe3d67a71b8742fb28999485085b61769d2b695eaab2436"} Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.026876 4812 scope.go:117] "RemoveContainer" containerID="a4452064c763db39abe3d67a71b8742fb28999485085b61769d2b695eaab2436" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.051656 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d398af8b4b16b15baa75821d0c30b6a72100aac425aca4e87fe9c1c2412673a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gljt8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:39Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.070168 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m7ndd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a156756-3629-4bed-8de0-1019226b7f04\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chd8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m7ndd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:39Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.085216 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3da77f06-36fd-465a-b764-fabfaca65715\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc3351f45003a0574af7993746039012d2cdb2d17ac1dd8b1f8b629dd4d921f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://794035d1a2dbe8b5a1d28a4174ca6729fed6f236749008a13826d8541edd5a21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d11e018db2635e10a6a329a03047f13b3ddf47115077c36a596b76bdc43275c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c173b4b996674926da742cdbbfe34c03e6711cf1005fff5faf16a266cfffec0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:39Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.097935 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:39Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.110380 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:39Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.112958 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.113026 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.113039 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.113057 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.113068 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:39Z","lastTransitionTime":"2025-11-25T16:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.122933 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ed911cf-2139-4b12-84ba-af635585ba29\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b0d9cef2c2ebce63dec5606b3f472a8ac27c7d5353c9e5ea12a9243e699ecb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lcgpx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:39Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.132569 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67213ffbc314ee091b39df4df290d4cbe30d310a724e99a5d797592d9b0a333c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:39Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.150267 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4452064c763db39abe3d67a71b8742fb28999485085b61769d2b695eaab2436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a4452064c763db39abe3d67a71b8742fb28999485085b61769d2b695eaab2436\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T16:47:38Z\\\",\\\"message\\\":\\\" handler 6\\\\nI1125 16:47:38.653396 6105 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 16:47:38.653401 6105 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 16:47:38.653425 6105 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 16:47:38.653456 6105 factory.go:656] Stopping watch factory\\\\nI1125 16:47:38.653460 6105 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 16:47:38.653497 6105 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 16:47:38.653428 6105 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 16:47:38.653669 6105 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 16:47:38.653720 6105 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 16:47:38.653993 6105 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hwqsk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:39Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.164542 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:39Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.179288 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:39Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.193242 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78c00c81fd5911a4d89ffcb8e555dbe88604cc35b9095a07a9c4d791d26ec9ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:39Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.208122 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:39Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.216521 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.216581 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.216595 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.216616 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.216632 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:39Z","lastTransitionTime":"2025-11-25T16:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.228150 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:39Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.249094 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c2ba033-91e8-49d8-8474-ebc2dfaeef15\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://179d394510edea595ed1ac39619911e2167f2b9cb5d228635609760932f428e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effc25d4eb530d685c8a7a0acebd25c418c3fc3675b367fe9ab49ae7c22a1cf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7668dab7cab029814b4edd7225c0a6fac9481f4e74f0ab906661b28775cdde45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://69a98172920b9a903cb0defe4630d0c6c1bb43d466ff352eb4d60e36fa18ea23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96c0e65aabd958b32841ea475624de4b58ff5c8690a5c767f6548872f6cc7ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:39Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.266836 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:39Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.319143 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.319193 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.319207 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.319225 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.319239 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:39Z","lastTransitionTime":"2025-11-25T16:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.423102 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.423134 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.423142 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.423155 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.423164 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:39Z","lastTransitionTime":"2025-11-25T16:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.525102 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.525143 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.525151 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.525168 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.525177 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:39Z","lastTransitionTime":"2025-11-25T16:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.627804 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.627866 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.627889 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.627917 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.627931 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:39Z","lastTransitionTime":"2025-11-25T16:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.731100 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.731151 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.731164 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.731185 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.731198 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:39Z","lastTransitionTime":"2025-11-25T16:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.830998 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.831060 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:47:39 crc kubenswrapper[4812]: E1125 16:47:39.831134 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.831005 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:47:39 crc kubenswrapper[4812]: E1125 16:47:39.831270 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:47:39 crc kubenswrapper[4812]: E1125 16:47:39.831472 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.833508 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.833552 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.833562 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.833575 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.833585 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:39Z","lastTransitionTime":"2025-11-25T16:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.935546 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.935630 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.935646 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.935665 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.935679 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:39Z","lastTransitionTime":"2025-11-25T16:47:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.970915 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft"] Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.971640 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.975315 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.975520 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.986960 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:39Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:39 crc kubenswrapper[4812]: I1125 16:47:39.997697 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ed911cf-2139-4b12-84ba-af635585ba29\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b0d9cef2c2ebce63dec5606b3f472a8ac27c7d5353c9e5ea12a9243e699ecb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lcgpx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:39Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.011359 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d398af8b4b16b15baa75821d0c30b6a72100aac425aca4e87fe9c1c2412673a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gljt8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.022673 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m7ndd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a156756-3629-4bed-8de0-1019226b7f04\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chd8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m7ndd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.034633 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hwqsk_bc4dc9ff-11a1-4151-91f0-3ff83020b3b9/ovnkube-controller/1.log" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.035119 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3da77f06-36fd-465a-b764-fabfaca65715\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc3351f45003a0574af7993746039012d2cdb2d17ac1dd8b1f8b629dd4d921f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://794035d1a2dbe8b5a1d28a4174ca6729fed6f236749008a13826d8541edd5a21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d11e018db2635e10a6a329a03047f13b3ddf47115077c36a596b76bdc43275c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c173b4b996674926da742cdbbfe34c03e6711cf1005fff5faf16a266cfffec0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.035584 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hwqsk_bc4dc9ff-11a1-4151-91f0-3ff83020b3b9/ovnkube-controller/0.log" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.038737 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.038800 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.038819 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.038850 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.038871 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:40Z","lastTransitionTime":"2025-11-25T16:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.040738 4812 generic.go:334] "Generic (PLEG): container finished" podID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerID="c3e73e40ceff981a3b56eaec0f1efd6bf1fe1e70ea543cda0804aca64fa11449" exitCode=1 Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.040787 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" event={"ID":"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9","Type":"ContainerDied","Data":"c3e73e40ceff981a3b56eaec0f1efd6bf1fe1e70ea543cda0804aca64fa11449"} Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.040850 4812 scope.go:117] "RemoveContainer" containerID="a4452064c763db39abe3d67a71b8742fb28999485085b61769d2b695eaab2436" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.043062 4812 scope.go:117] "RemoveContainer" containerID="c3e73e40ceff981a3b56eaec0f1efd6bf1fe1e70ea543cda0804aca64fa11449" Nov 25 16:47:40 crc kubenswrapper[4812]: E1125 16:47:40.043278 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-hwqsk_openshift-ovn-kubernetes(bc4dc9ff-11a1-4151-91f0-3ff83020b3b9)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.048639 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.058597 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78c00c81fd5911a4d89ffcb8e555dbe88604cc35b9095a07a9c4d791d26ec9ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.065806 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/cd450cbc-02a7-4b95-8c0f-455df0f1f996-env-overrides\") pod \"ovnkube-control-plane-749d76644c-h2fft\" (UID: \"cd450cbc-02a7-4b95-8c0f-455df0f1f996\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.065846 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k9kb5\" (UniqueName: \"kubernetes.io/projected/cd450cbc-02a7-4b95-8c0f-455df0f1f996-kube-api-access-k9kb5\") pod \"ovnkube-control-plane-749d76644c-h2fft\" (UID: \"cd450cbc-02a7-4b95-8c0f-455df0f1f996\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.065865 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/cd450cbc-02a7-4b95-8c0f-455df0f1f996-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-h2fft\" (UID: \"cd450cbc-02a7-4b95-8c0f-455df0f1f996\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.065889 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/cd450cbc-02a7-4b95-8c0f-455df0f1f996-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-h2fft\" (UID: \"cd450cbc-02a7-4b95-8c0f-455df0f1f996\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.066343 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.078175 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67213ffbc314ee091b39df4df290d4cbe30d310a724e99a5d797592d9b0a333c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.093673 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4452064c763db39abe3d67a71b8742fb28999485085b61769d2b695eaab2436\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a4452064c763db39abe3d67a71b8742fb28999485085b61769d2b695eaab2436\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T16:47:38Z\\\",\\\"message\\\":\\\" handler 6\\\\nI1125 16:47:38.653396 6105 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 16:47:38.653401 6105 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 16:47:38.653425 6105 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 16:47:38.653456 6105 factory.go:656] Stopping watch factory\\\\nI1125 16:47:38.653460 6105 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 16:47:38.653497 6105 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 16:47:38.653428 6105 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 16:47:38.653669 6105 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 16:47:38.653720 6105 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 16:47:38.653993 6105 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hwqsk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.104565 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.114500 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.125024 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.142010 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c2ba033-91e8-49d8-8474-ebc2dfaeef15\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://179d394510edea595ed1ac39619911e2167f2b9cb5d228635609760932f428e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effc25d4eb530d685c8a7a0acebd25c418c3fc3675b367fe9ab49ae7c22a1cf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7668dab7cab029814b4edd7225c0a6fac9481f4e74f0ab906661b28775cdde45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://69a98172920b9a903cb0defe4630d0c6c1bb43d466ff352eb4d60e36fa18ea23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96c0e65aabd958b32841ea475624de4b58ff5c8690a5c767f6548872f6cc7ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.142479 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.142518 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.142551 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.142570 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.142583 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:40Z","lastTransitionTime":"2025-11-25T16:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.154160 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.165032 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd450cbc-02a7-4b95-8c0f-455df0f1f996\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2fft\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.167361 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/cd450cbc-02a7-4b95-8c0f-455df0f1f996-env-overrides\") pod \"ovnkube-control-plane-749d76644c-h2fft\" (UID: \"cd450cbc-02a7-4b95-8c0f-455df0f1f996\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.167426 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k9kb5\" (UniqueName: \"kubernetes.io/projected/cd450cbc-02a7-4b95-8c0f-455df0f1f996-kube-api-access-k9kb5\") pod \"ovnkube-control-plane-749d76644c-h2fft\" (UID: \"cd450cbc-02a7-4b95-8c0f-455df0f1f996\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.167457 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/cd450cbc-02a7-4b95-8c0f-455df0f1f996-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-h2fft\" (UID: \"cd450cbc-02a7-4b95-8c0f-455df0f1f996\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.167494 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/cd450cbc-02a7-4b95-8c0f-455df0f1f996-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-h2fft\" (UID: \"cd450cbc-02a7-4b95-8c0f-455df0f1f996\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.169059 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/cd450cbc-02a7-4b95-8c0f-455df0f1f996-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-h2fft\" (UID: \"cd450cbc-02a7-4b95-8c0f-455df0f1f996\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.169112 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/cd450cbc-02a7-4b95-8c0f-455df0f1f996-env-overrides\") pod \"ovnkube-control-plane-749d76644c-h2fft\" (UID: \"cd450cbc-02a7-4b95-8c0f-455df0f1f996\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.176024 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/cd450cbc-02a7-4b95-8c0f-455df0f1f996-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-h2fft\" (UID: \"cd450cbc-02a7-4b95-8c0f-455df0f1f996\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.178301 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.182729 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k9kb5\" (UniqueName: \"kubernetes.io/projected/cd450cbc-02a7-4b95-8c0f-455df0f1f996-kube-api-access-k9kb5\") pod \"ovnkube-control-plane-749d76644c-h2fft\" (UID: \"cd450cbc-02a7-4b95-8c0f-455df0f1f996\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.189330 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.200987 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78c00c81fd5911a4d89ffcb8e555dbe88604cc35b9095a07a9c4d791d26ec9ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.210519 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.220486 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67213ffbc314ee091b39df4df290d4cbe30d310a724e99a5d797592d9b0a333c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.235717 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3e73e40ceff981a3b56eaec0f1efd6bf1fe1e70ea543cda0804aca64fa11449\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a4452064c763db39abe3d67a71b8742fb28999485085b61769d2b695eaab2436\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T16:47:38Z\\\",\\\"message\\\":\\\" handler 6\\\\nI1125 16:47:38.653396 6105 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 16:47:38.653401 6105 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 16:47:38.653425 6105 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 16:47:38.653456 6105 factory.go:656] Stopping watch factory\\\\nI1125 16:47:38.653460 6105 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 16:47:38.653497 6105 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 16:47:38.653428 6105 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 16:47:38.653669 6105 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 16:47:38.653720 6105 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 16:47:38.653993 6105 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:35Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3e73e40ceff981a3b56eaec0f1efd6bf1fe1e70ea543cda0804aca64fa11449\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"message\\\":\\\"4738-8709-09636387cb00}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1125 16:47:39.862298 6259 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-multus/multus-admission-controller]} name:Service_openshift-multus/multus-admission-controller_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.119:443: 10.217.5.119:8443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d4efc4a8-c514-4a6b-901c-2953978b50d3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1125 16:47:39.862362 6259 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: fa\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hwqsk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.244575 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.244630 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.244645 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.244667 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.244680 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:40Z","lastTransitionTime":"2025-11-25T16:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.248178 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.264466 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c2ba033-91e8-49d8-8474-ebc2dfaeef15\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://179d394510edea595ed1ac39619911e2167f2b9cb5d228635609760932f428e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effc25d4eb530d685c8a7a0acebd25c418c3fc3675b367fe9ab49ae7c22a1cf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7668dab7cab029814b4edd7225c0a6fac9481f4e74f0ab906661b28775cdde45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://69a98172920b9a903cb0defe4630d0c6c1bb43d466ff352eb4d60e36fa18ea23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96c0e65aabd958b32841ea475624de4b58ff5c8690a5c767f6548872f6cc7ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.277819 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.284759 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.287647 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd450cbc-02a7-4b95-8c0f-455df0f1f996\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2fft\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: W1125 16:47:40.297013 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcd450cbc_02a7_4b95_8c0f_455df0f1f996.slice/crio-5d149e6118b20d0b1d6a361bcf4c0026aee430ae81baa55bd18eb677e1818523 WatchSource:0}: Error finding container 5d149e6118b20d0b1d6a361bcf4c0026aee430ae81baa55bd18eb677e1818523: Status 404 returned error can't find the container with id 5d149e6118b20d0b1d6a361bcf4c0026aee430ae81baa55bd18eb677e1818523 Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.302623 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3da77f06-36fd-465a-b764-fabfaca65715\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc3351f45003a0574af7993746039012d2cdb2d17ac1dd8b1f8b629dd4d921f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://794035d1a2dbe8b5a1d28a4174ca6729fed6f236749008a13826d8541edd5a21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d11e018db2635e10a6a329a03047f13b3ddf47115077c36a596b76bdc43275c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c173b4b996674926da742cdbbfe34c03e6711cf1005fff5faf16a266cfffec0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.314797 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.326815 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.336729 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ed911cf-2139-4b12-84ba-af635585ba29\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b0d9cef2c2ebce63dec5606b3f472a8ac27c7d5353c9e5ea12a9243e699ecb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lcgpx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.347263 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.347298 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.347307 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.347320 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.347332 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:40Z","lastTransitionTime":"2025-11-25T16:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.351830 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d398af8b4b16b15baa75821d0c30b6a72100aac425aca4e87fe9c1c2412673a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gljt8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.366826 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m7ndd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a156756-3629-4bed-8de0-1019226b7f04\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chd8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m7ndd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.449400 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.449432 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.449441 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.449456 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.449466 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:40Z","lastTransitionTime":"2025-11-25T16:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.551410 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.551446 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.551457 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.551473 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.551485 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:40Z","lastTransitionTime":"2025-11-25T16:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.653186 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.653231 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.653243 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.653257 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.653267 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:40Z","lastTransitionTime":"2025-11-25T16:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.755292 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.755331 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.755343 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.755360 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.755374 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:40Z","lastTransitionTime":"2025-11-25T16:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.816315 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.836286 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c2ba033-91e8-49d8-8474-ebc2dfaeef15\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://179d394510edea595ed1ac39619911e2167f2b9cb5d228635609760932f428e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effc25d4eb530d685c8a7a0acebd25c418c3fc3675b367fe9ab49ae7c22a1cf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7668dab7cab029814b4edd7225c0a6fac9481f4e74f0ab906661b28775cdde45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://69a98172920b9a903cb0defe4630d0c6c1bb43d466ff352eb4d60e36fa18ea23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96c0e65aabd958b32841ea475624de4b58ff5c8690a5c767f6548872f6cc7ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.848695 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.858832 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd450cbc-02a7-4b95-8c0f-455df0f1f996\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2fft\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.859213 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.859244 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.859254 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.859271 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.859282 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:40Z","lastTransitionTime":"2025-11-25T16:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.870949 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3da77f06-36fd-465a-b764-fabfaca65715\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc3351f45003a0574af7993746039012d2cdb2d17ac1dd8b1f8b629dd4d921f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://794035d1a2dbe8b5a1d28a4174ca6729fed6f236749008a13826d8541edd5a21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d11e018db2635e10a6a329a03047f13b3ddf47115077c36a596b76bdc43275c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c173b4b996674926da742cdbbfe34c03e6711cf1005fff5faf16a266cfffec0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.882068 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.892319 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.901517 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ed911cf-2139-4b12-84ba-af635585ba29\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b0d9cef2c2ebce63dec5606b3f472a8ac27c7d5353c9e5ea12a9243e699ecb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lcgpx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.916840 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d398af8b4b16b15baa75821d0c30b6a72100aac425aca4e87fe9c1c2412673a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gljt8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.928371 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m7ndd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a156756-3629-4bed-8de0-1019226b7f04\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chd8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m7ndd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.939020 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.948899 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.959188 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78c00c81fd5911a4d89ffcb8e555dbe88604cc35b9095a07a9c4d791d26ec9ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.961089 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.961139 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.961158 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.961440 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.961485 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:40Z","lastTransitionTime":"2025-11-25T16:47:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.972731 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:40 crc kubenswrapper[4812]: I1125 16:47:40.984663 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67213ffbc314ee091b39df4df290d4cbe30d310a724e99a5d797592d9b0a333c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:40Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.007676 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3e73e40ceff981a3b56eaec0f1efd6bf1fe1e70ea543cda0804aca64fa11449\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a4452064c763db39abe3d67a71b8742fb28999485085b61769d2b695eaab2436\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T16:47:38Z\\\",\\\"message\\\":\\\" handler 6\\\\nI1125 16:47:38.653396 6105 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 16:47:38.653401 6105 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 16:47:38.653425 6105 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 16:47:38.653456 6105 factory.go:656] Stopping watch factory\\\\nI1125 16:47:38.653460 6105 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 16:47:38.653497 6105 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 16:47:38.653428 6105 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 16:47:38.653669 6105 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 16:47:38.653720 6105 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 16:47:38.653993 6105 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:35Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3e73e40ceff981a3b56eaec0f1efd6bf1fe1e70ea543cda0804aca64fa11449\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"message\\\":\\\"4738-8709-09636387cb00}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1125 16:47:39.862298 6259 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-multus/multus-admission-controller]} name:Service_openshift-multus/multus-admission-controller_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.119:443: 10.217.5.119:8443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d4efc4a8-c514-4a6b-901c-2953978b50d3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1125 16:47:39.862362 6259 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: fa\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hwqsk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:41Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.020564 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:41Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.045774 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hwqsk_bc4dc9ff-11a1-4151-91f0-3ff83020b3b9/ovnkube-controller/1.log" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.049079 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" event={"ID":"cd450cbc-02a7-4b95-8c0f-455df0f1f996","Type":"ContainerStarted","Data":"8b3be9778300f71493b3018d697ac9931b4825e96b56386e2d83ad9e75accbd4"} Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.049118 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" event={"ID":"cd450cbc-02a7-4b95-8c0f-455df0f1f996","Type":"ContainerStarted","Data":"5d149e6118b20d0b1d6a361bcf4c0026aee430ae81baa55bd18eb677e1818523"} Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.063476 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.063548 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.063561 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.063579 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.063591 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:41Z","lastTransitionTime":"2025-11-25T16:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.166296 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.166340 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.166352 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.166368 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.166377 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:41Z","lastTransitionTime":"2025-11-25T16:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.269017 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.269066 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.269080 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.269099 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.269110 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:41Z","lastTransitionTime":"2025-11-25T16:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.371264 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.371302 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.371311 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.371326 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.371337 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:41Z","lastTransitionTime":"2025-11-25T16:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.403677 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-82fvc"] Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.404324 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:47:41 crc kubenswrapper[4812]: E1125 16:47:41.404415 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.417845 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m7ndd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a156756-3629-4bed-8de0-1019226b7f04\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chd8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m7ndd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:41Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.428184 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-82fvc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbb57832-3993-492b-80c9-a6a61891a125\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8dqsg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8dqsg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:41Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-82fvc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:41Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.439592 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3da77f06-36fd-465a-b764-fabfaca65715\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc3351f45003a0574af7993746039012d2cdb2d17ac1dd8b1f8b629dd4d921f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://794035d1a2dbe8b5a1d28a4174ca6729fed6f236749008a13826d8541edd5a21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d11e018db2635e10a6a329a03047f13b3ddf47115077c36a596b76bdc43275c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c173b4b996674926da742cdbbfe34c03e6711cf1005fff5faf16a266cfffec0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:41Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.450313 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:41Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.461009 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:41Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.470547 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ed911cf-2139-4b12-84ba-af635585ba29\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b0d9cef2c2ebce63dec5606b3f472a8ac27c7d5353c9e5ea12a9243e699ecb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lcgpx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:41Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.472933 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.472982 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.472995 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.473018 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.473032 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:41Z","lastTransitionTime":"2025-11-25T16:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.480898 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.480948 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:47:41 crc kubenswrapper[4812]: E1125 16:47:41.481040 4812 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 16:47:41 crc kubenswrapper[4812]: E1125 16:47:41.481058 4812 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 16:47:41 crc kubenswrapper[4812]: E1125 16:47:41.481095 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 16:47:57.481082783 +0000 UTC m=+52.321224878 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 16:47:41 crc kubenswrapper[4812]: E1125 16:47:41.481130 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 16:47:57.481112203 +0000 UTC m=+52.321254298 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.484227 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d398af8b4b16b15baa75821d0c30b6a72100aac425aca4e87fe9c1c2412673a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gljt8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:41Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.501699 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3e73e40ceff981a3b56eaec0f1efd6bf1fe1e70ea543cda0804aca64fa11449\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a4452064c763db39abe3d67a71b8742fb28999485085b61769d2b695eaab2436\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T16:47:38Z\\\",\\\"message\\\":\\\" handler 6\\\\nI1125 16:47:38.653396 6105 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 16:47:38.653401 6105 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 16:47:38.653425 6105 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 16:47:38.653456 6105 factory.go:656] Stopping watch factory\\\\nI1125 16:47:38.653460 6105 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 16:47:38.653497 6105 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 16:47:38.653428 6105 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 16:47:38.653669 6105 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 16:47:38.653720 6105 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 16:47:38.653993 6105 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:35Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3e73e40ceff981a3b56eaec0f1efd6bf1fe1e70ea543cda0804aca64fa11449\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"message\\\":\\\"4738-8709-09636387cb00}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1125 16:47:39.862298 6259 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-multus/multus-admission-controller]} name:Service_openshift-multus/multus-admission-controller_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.119:443: 10.217.5.119:8443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d4efc4a8-c514-4a6b-901c-2953978b50d3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1125 16:47:39.862362 6259 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: fa\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hwqsk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:41Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.514254 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:41Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.528774 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:41Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.540717 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78c00c81fd5911a4d89ffcb8e555dbe88604cc35b9095a07a9c4d791d26ec9ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:41Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.556599 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:41Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.566962 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67213ffbc314ee091b39df4df290d4cbe30d310a724e99a5d797592d9b0a333c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:41Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.575320 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.575390 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.575402 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.575419 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.575431 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:41Z","lastTransitionTime":"2025-11-25T16:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.580457 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:41Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.581792 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:47:41 crc kubenswrapper[4812]: E1125 16:47:41.581930 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:47:57.581908884 +0000 UTC m=+52.422051009 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.581975 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.582019 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8dqsg\" (UniqueName: \"kubernetes.io/projected/fbb57832-3993-492b-80c9-a6a61891a125-kube-api-access-8dqsg\") pod \"network-metrics-daemon-82fvc\" (UID: \"fbb57832-3993-492b-80c9-a6a61891a125\") " pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.582097 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fbb57832-3993-492b-80c9-a6a61891a125-metrics-certs\") pod \"network-metrics-daemon-82fvc\" (UID: \"fbb57832-3993-492b-80c9-a6a61891a125\") " pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:47:41 crc kubenswrapper[4812]: E1125 16:47:41.582148 4812 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 16:47:41 crc kubenswrapper[4812]: E1125 16:47:41.582170 4812 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 16:47:41 crc kubenswrapper[4812]: E1125 16:47:41.582184 4812 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 16:47:41 crc kubenswrapper[4812]: E1125 16:47:41.582276 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 16:47:57.582249622 +0000 UTC m=+52.422391787 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.582333 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:47:41 crc kubenswrapper[4812]: E1125 16:47:41.582412 4812 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 16:47:41 crc kubenswrapper[4812]: E1125 16:47:41.582432 4812 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 16:47:41 crc kubenswrapper[4812]: E1125 16:47:41.582442 4812 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 16:47:41 crc kubenswrapper[4812]: E1125 16:47:41.582496 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 16:47:57.582480549 +0000 UTC m=+52.422622714 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.599816 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c2ba033-91e8-49d8-8474-ebc2dfaeef15\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://179d394510edea595ed1ac39619911e2167f2b9cb5d228635609760932f428e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effc25d4eb530d685c8a7a0acebd25c418c3fc3675b367fe9ab49ae7c22a1cf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7668dab7cab029814b4edd7225c0a6fac9481f4e74f0ab906661b28775cdde45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://69a98172920b9a903cb0defe4630d0c6c1bb43d466ff352eb4d60e36fa18ea23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96c0e65aabd958b32841ea475624de4b58ff5c8690a5c767f6548872f6cc7ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:41Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.614132 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:41Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.625049 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd450cbc-02a7-4b95-8c0f-455df0f1f996\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2fft\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:41Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.677744 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.677775 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.677785 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.677798 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.677807 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:41Z","lastTransitionTime":"2025-11-25T16:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.683376 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8dqsg\" (UniqueName: \"kubernetes.io/projected/fbb57832-3993-492b-80c9-a6a61891a125-kube-api-access-8dqsg\") pod \"network-metrics-daemon-82fvc\" (UID: \"fbb57832-3993-492b-80c9-a6a61891a125\") " pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.683427 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fbb57832-3993-492b-80c9-a6a61891a125-metrics-certs\") pod \"network-metrics-daemon-82fvc\" (UID: \"fbb57832-3993-492b-80c9-a6a61891a125\") " pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:47:41 crc kubenswrapper[4812]: E1125 16:47:41.683603 4812 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 16:47:41 crc kubenswrapper[4812]: E1125 16:47:41.683672 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fbb57832-3993-492b-80c9-a6a61891a125-metrics-certs podName:fbb57832-3993-492b-80c9-a6a61891a125 nodeName:}" failed. No retries permitted until 2025-11-25 16:47:42.183655489 +0000 UTC m=+37.023797584 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fbb57832-3993-492b-80c9-a6a61891a125-metrics-certs") pod "network-metrics-daemon-82fvc" (UID: "fbb57832-3993-492b-80c9-a6a61891a125") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.698237 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8dqsg\" (UniqueName: \"kubernetes.io/projected/fbb57832-3993-492b-80c9-a6a61891a125-kube-api-access-8dqsg\") pod \"network-metrics-daemon-82fvc\" (UID: \"fbb57832-3993-492b-80c9-a6a61891a125\") " pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.780065 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.780121 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.780138 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.780170 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.780187 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:41Z","lastTransitionTime":"2025-11-25T16:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.831017 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.831051 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.831054 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:47:41 crc kubenswrapper[4812]: E1125 16:47:41.831169 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:47:41 crc kubenswrapper[4812]: E1125 16:47:41.831295 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:47:41 crc kubenswrapper[4812]: E1125 16:47:41.831451 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.882144 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.882189 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.882198 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.882212 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.882222 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:41Z","lastTransitionTime":"2025-11-25T16:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.984912 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.984991 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.985001 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.985031 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:41 crc kubenswrapper[4812]: I1125 16:47:41.985047 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:41Z","lastTransitionTime":"2025-11-25T16:47:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.054543 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" event={"ID":"cd450cbc-02a7-4b95-8c0f-455df0f1f996","Type":"ContainerStarted","Data":"067be8c1425967cc2944cc42abc9ab532d98fcc21d129ea57fab364c1aa83d08"} Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.070865 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:42Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.087787 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.087842 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.087857 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.087886 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.087902 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:42Z","lastTransitionTime":"2025-11-25T16:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.096211 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c2ba033-91e8-49d8-8474-ebc2dfaeef15\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://179d394510edea595ed1ac39619911e2167f2b9cb5d228635609760932f428e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effc25d4eb530d685c8a7a0acebd25c418c3fc3675b367fe9ab49ae7c22a1cf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7668dab7cab029814b4edd7225c0a6fac9481f4e74f0ab906661b28775cdde45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://69a98172920b9a903cb0defe4630d0c6c1bb43d466ff352eb4d60e36fa18ea23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96c0e65aabd958b32841ea475624de4b58ff5c8690a5c767f6548872f6cc7ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:42Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.115442 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:42Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.131063 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd450cbc-02a7-4b95-8c0f-455df0f1f996\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b3be9778300f71493b3018d697ac9931b4825e96b56386e2d83ad9e75accbd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://067be8c1425967cc2944cc42abc9ab532d98fcc21d129ea57fab364c1aa83d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2fft\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:42Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.146673 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ed911cf-2139-4b12-84ba-af635585ba29\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b0d9cef2c2ebce63dec5606b3f472a8ac27c7d5353c9e5ea12a9243e699ecb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lcgpx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:42Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.163242 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d398af8b4b16b15baa75821d0c30b6a72100aac425aca4e87fe9c1c2412673a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gljt8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:42Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.177611 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m7ndd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a156756-3629-4bed-8de0-1019226b7f04\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chd8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m7ndd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:42Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.188427 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fbb57832-3993-492b-80c9-a6a61891a125-metrics-certs\") pod \"network-metrics-daemon-82fvc\" (UID: \"fbb57832-3993-492b-80c9-a6a61891a125\") " pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:47:42 crc kubenswrapper[4812]: E1125 16:47:42.188668 4812 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 16:47:42 crc kubenswrapper[4812]: E1125 16:47:42.188751 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fbb57832-3993-492b-80c9-a6a61891a125-metrics-certs podName:fbb57832-3993-492b-80c9-a6a61891a125 nodeName:}" failed. No retries permitted until 2025-11-25 16:47:43.188728129 +0000 UTC m=+38.028870224 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fbb57832-3993-492b-80c9-a6a61891a125-metrics-certs") pod "network-metrics-daemon-82fvc" (UID: "fbb57832-3993-492b-80c9-a6a61891a125") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.190545 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.190586 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.190601 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.190623 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.190586 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-82fvc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbb57832-3993-492b-80c9-a6a61891a125\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8dqsg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8dqsg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:41Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-82fvc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:42Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.190642 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:42Z","lastTransitionTime":"2025-11-25T16:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.204287 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3da77f06-36fd-465a-b764-fabfaca65715\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc3351f45003a0574af7993746039012d2cdb2d17ac1dd8b1f8b629dd4d921f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://794035d1a2dbe8b5a1d28a4174ca6729fed6f236749008a13826d8541edd5a21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d11e018db2635e10a6a329a03047f13b3ddf47115077c36a596b76bdc43275c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c173b4b996674926da742cdbbfe34c03e6711cf1005fff5faf16a266cfffec0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:42Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.221272 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:42Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.235291 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:42Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.246206 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:42Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.257282 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67213ffbc314ee091b39df4df290d4cbe30d310a724e99a5d797592d9b0a333c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:42Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.278905 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3e73e40ceff981a3b56eaec0f1efd6bf1fe1e70ea543cda0804aca64fa11449\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a4452064c763db39abe3d67a71b8742fb28999485085b61769d2b695eaab2436\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T16:47:38Z\\\",\\\"message\\\":\\\" handler 6\\\\nI1125 16:47:38.653396 6105 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 16:47:38.653401 6105 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 16:47:38.653425 6105 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 16:47:38.653456 6105 factory.go:656] Stopping watch factory\\\\nI1125 16:47:38.653460 6105 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 16:47:38.653497 6105 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 16:47:38.653428 6105 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 16:47:38.653669 6105 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 16:47:38.653720 6105 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 16:47:38.653993 6105 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:35Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3e73e40ceff981a3b56eaec0f1efd6bf1fe1e70ea543cda0804aca64fa11449\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"message\\\":\\\"4738-8709-09636387cb00}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1125 16:47:39.862298 6259 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-multus/multus-admission-controller]} name:Service_openshift-multus/multus-admission-controller_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.119:443: 10.217.5.119:8443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d4efc4a8-c514-4a6b-901c-2953978b50d3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1125 16:47:39.862362 6259 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: fa\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hwqsk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:42Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.291566 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:42Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.292599 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.292647 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.292662 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.292680 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.292693 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:42Z","lastTransitionTime":"2025-11-25T16:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.304403 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:42Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.316515 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78c00c81fd5911a4d89ffcb8e555dbe88604cc35b9095a07a9c4d791d26ec9ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:42Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.333971 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.334006 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.334015 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.334027 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.334037 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:42Z","lastTransitionTime":"2025-11-25T16:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:42 crc kubenswrapper[4812]: E1125 16:47:42.350971 4812 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"42bff2ed-94cf-457c-8bcf-017111af962a\\\",\\\"systemUUID\\\":\\\"93542aec-3cae-4037-9cb4-28e49d8b2f68\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:42Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.355333 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.355371 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.355382 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.355399 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.355414 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:42Z","lastTransitionTime":"2025-11-25T16:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:42 crc kubenswrapper[4812]: E1125 16:47:42.366257 4812 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"42bff2ed-94cf-457c-8bcf-017111af962a\\\",\\\"systemUUID\\\":\\\"93542aec-3cae-4037-9cb4-28e49d8b2f68\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:42Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.369779 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.369812 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.369821 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.369835 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.369846 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:42Z","lastTransitionTime":"2025-11-25T16:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:42 crc kubenswrapper[4812]: E1125 16:47:42.379785 4812 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"42bff2ed-94cf-457c-8bcf-017111af962a\\\",\\\"systemUUID\\\":\\\"93542aec-3cae-4037-9cb4-28e49d8b2f68\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:42Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.382787 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.382822 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.382833 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.382852 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.382862 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:42Z","lastTransitionTime":"2025-11-25T16:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:42 crc kubenswrapper[4812]: E1125 16:47:42.394885 4812 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"42bff2ed-94cf-457c-8bcf-017111af962a\\\",\\\"systemUUID\\\":\\\"93542aec-3cae-4037-9cb4-28e49d8b2f68\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:42Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.397890 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.397922 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.397931 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.397945 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.397954 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:42Z","lastTransitionTime":"2025-11-25T16:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:42 crc kubenswrapper[4812]: E1125 16:47:42.408588 4812 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"42bff2ed-94cf-457c-8bcf-017111af962a\\\",\\\"systemUUID\\\":\\\"93542aec-3cae-4037-9cb4-28e49d8b2f68\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:42Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:42 crc kubenswrapper[4812]: E1125 16:47:42.408695 4812 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.410271 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.410301 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.410313 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.410329 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.410342 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:42Z","lastTransitionTime":"2025-11-25T16:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.513041 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.513097 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.513105 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.513119 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.513128 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:42Z","lastTransitionTime":"2025-11-25T16:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.615827 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.615865 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.615875 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.615892 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.615901 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:42Z","lastTransitionTime":"2025-11-25T16:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.718355 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.718396 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.718403 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.718420 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.718431 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:42Z","lastTransitionTime":"2025-11-25T16:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.820291 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.820341 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.820349 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.820380 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.820389 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:42Z","lastTransitionTime":"2025-11-25T16:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.830871 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:47:42 crc kubenswrapper[4812]: E1125 16:47:42.830991 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.923370 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.923406 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.923414 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.923431 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:42 crc kubenswrapper[4812]: I1125 16:47:42.923441 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:42Z","lastTransitionTime":"2025-11-25T16:47:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.025844 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.025885 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.025893 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.025910 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.025922 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:43Z","lastTransitionTime":"2025-11-25T16:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.128964 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.128999 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.129007 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.129019 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.129028 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:43Z","lastTransitionTime":"2025-11-25T16:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.198076 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fbb57832-3993-492b-80c9-a6a61891a125-metrics-certs\") pod \"network-metrics-daemon-82fvc\" (UID: \"fbb57832-3993-492b-80c9-a6a61891a125\") " pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:47:43 crc kubenswrapper[4812]: E1125 16:47:43.198273 4812 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 16:47:43 crc kubenswrapper[4812]: E1125 16:47:43.198371 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fbb57832-3993-492b-80c9-a6a61891a125-metrics-certs podName:fbb57832-3993-492b-80c9-a6a61891a125 nodeName:}" failed. No retries permitted until 2025-11-25 16:47:45.198340414 +0000 UTC m=+40.038482549 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fbb57832-3993-492b-80c9-a6a61891a125-metrics-certs") pod "network-metrics-daemon-82fvc" (UID: "fbb57832-3993-492b-80c9-a6a61891a125") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.232149 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.232190 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.232199 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.232216 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.232225 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:43Z","lastTransitionTime":"2025-11-25T16:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.334910 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.334969 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.334984 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.335005 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.335021 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:43Z","lastTransitionTime":"2025-11-25T16:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.436966 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.436995 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.437003 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.437015 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.437024 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:43Z","lastTransitionTime":"2025-11-25T16:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.539784 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.539822 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.539831 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.539845 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.539854 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:43Z","lastTransitionTime":"2025-11-25T16:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.643063 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.643125 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.643153 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.643181 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.643202 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:43Z","lastTransitionTime":"2025-11-25T16:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.746724 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.746760 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.746770 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.746784 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.746794 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:43Z","lastTransitionTime":"2025-11-25T16:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.830506 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.830593 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.830680 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:47:43 crc kubenswrapper[4812]: E1125 16:47:43.830808 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:47:43 crc kubenswrapper[4812]: E1125 16:47:43.830919 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:47:43 crc kubenswrapper[4812]: E1125 16:47:43.831035 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.849160 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.849223 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.849237 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.849278 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.849292 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:43Z","lastTransitionTime":"2025-11-25T16:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.952045 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.952085 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.952094 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.952108 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:43 crc kubenswrapper[4812]: I1125 16:47:43.952118 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:43Z","lastTransitionTime":"2025-11-25T16:47:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.054280 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.054345 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.054362 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.054385 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.054400 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:44Z","lastTransitionTime":"2025-11-25T16:47:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.157254 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.157298 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.157311 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.157327 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.157340 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:44Z","lastTransitionTime":"2025-11-25T16:47:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.259774 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.259802 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.259810 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.259824 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.259832 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:44Z","lastTransitionTime":"2025-11-25T16:47:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.362479 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.362569 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.362588 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.362613 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.362629 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:44Z","lastTransitionTime":"2025-11-25T16:47:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.465384 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.465434 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.465445 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.465462 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.465475 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:44Z","lastTransitionTime":"2025-11-25T16:47:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.567973 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.568017 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.568027 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.568043 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.568055 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:44Z","lastTransitionTime":"2025-11-25T16:47:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.670631 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.670677 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.670691 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.670707 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.670718 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:44Z","lastTransitionTime":"2025-11-25T16:47:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.774165 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.774212 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.774224 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.774241 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.774255 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:44Z","lastTransitionTime":"2025-11-25T16:47:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.831036 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:47:44 crc kubenswrapper[4812]: E1125 16:47:44.831183 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.877071 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.877122 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.877134 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.877152 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.877165 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:44Z","lastTransitionTime":"2025-11-25T16:47:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.979615 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.979650 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.979660 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.979672 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:44 crc kubenswrapper[4812]: I1125 16:47:44.979681 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:44Z","lastTransitionTime":"2025-11-25T16:47:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.081924 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.081970 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.081980 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.081998 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.082008 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:45Z","lastTransitionTime":"2025-11-25T16:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.184786 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.185022 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.185051 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.185080 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.185103 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:45Z","lastTransitionTime":"2025-11-25T16:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.221436 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fbb57832-3993-492b-80c9-a6a61891a125-metrics-certs\") pod \"network-metrics-daemon-82fvc\" (UID: \"fbb57832-3993-492b-80c9-a6a61891a125\") " pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:47:45 crc kubenswrapper[4812]: E1125 16:47:45.221619 4812 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 16:47:45 crc kubenswrapper[4812]: E1125 16:47:45.221693 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fbb57832-3993-492b-80c9-a6a61891a125-metrics-certs podName:fbb57832-3993-492b-80c9-a6a61891a125 nodeName:}" failed. No retries permitted until 2025-11-25 16:47:49.221676212 +0000 UTC m=+44.061818317 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fbb57832-3993-492b-80c9-a6a61891a125-metrics-certs") pod "network-metrics-daemon-82fvc" (UID: "fbb57832-3993-492b-80c9-a6a61891a125") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.287860 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.287954 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.287977 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.288016 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.288043 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:45Z","lastTransitionTime":"2025-11-25T16:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.391199 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.391241 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.391250 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.391265 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.391275 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:45Z","lastTransitionTime":"2025-11-25T16:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.494029 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.494100 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.494125 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.494151 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.494169 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:45Z","lastTransitionTime":"2025-11-25T16:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.596873 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.596917 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.596926 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.596944 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.596957 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:45Z","lastTransitionTime":"2025-11-25T16:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.700055 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.700102 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.700120 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.700141 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.700152 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:45Z","lastTransitionTime":"2025-11-25T16:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.802869 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.802921 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.802931 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.802952 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.802967 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:45Z","lastTransitionTime":"2025-11-25T16:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.830687 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.830748 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.830687 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:47:45 crc kubenswrapper[4812]: E1125 16:47:45.830836 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:47:45 crc kubenswrapper[4812]: E1125 16:47:45.830996 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:47:45 crc kubenswrapper[4812]: E1125 16:47:45.831104 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.843374 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:45Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.855931 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd450cbc-02a7-4b95-8c0f-455df0f1f996\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b3be9778300f71493b3018d697ac9931b4825e96b56386e2d83ad9e75accbd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://067be8c1425967cc2944cc42abc9ab532d98fcc21d129ea57fab364c1aa83d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2fft\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:45Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.877344 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c2ba033-91e8-49d8-8474-ebc2dfaeef15\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://179d394510edea595ed1ac39619911e2167f2b9cb5d228635609760932f428e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effc25d4eb530d685c8a7a0acebd25c418c3fc3675b367fe9ab49ae7c22a1cf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7668dab7cab029814b4edd7225c0a6fac9481f4e74f0ab906661b28775cdde45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://69a98172920b9a903cb0defe4630d0c6c1bb43d466ff352eb4d60e36fa18ea23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96c0e65aabd958b32841ea475624de4b58ff5c8690a5c767f6548872f6cc7ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:45Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.889058 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:45Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.902761 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:45Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.904576 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.904618 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.904627 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.904640 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.904663 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:45Z","lastTransitionTime":"2025-11-25T16:47:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.926715 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:45Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.954038 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ed911cf-2139-4b12-84ba-af635585ba29\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b0d9cef2c2ebce63dec5606b3f472a8ac27c7d5353c9e5ea12a9243e699ecb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lcgpx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:45Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.973409 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d398af8b4b16b15baa75821d0c30b6a72100aac425aca4e87fe9c1c2412673a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gljt8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:45Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:45 crc kubenswrapper[4812]: I1125 16:47:45.989303 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m7ndd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a156756-3629-4bed-8de0-1019226b7f04\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chd8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m7ndd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:45Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.002322 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-82fvc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbb57832-3993-492b-80c9-a6a61891a125\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8dqsg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8dqsg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:41Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-82fvc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:46Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.006191 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.006216 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.006223 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.006235 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.006243 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:46Z","lastTransitionTime":"2025-11-25T16:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.014047 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3da77f06-36fd-465a-b764-fabfaca65715\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc3351f45003a0574af7993746039012d2cdb2d17ac1dd8b1f8b629dd4d921f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://794035d1a2dbe8b5a1d28a4174ca6729fed6f236749008a13826d8541edd5a21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d11e018db2635e10a6a329a03047f13b3ddf47115077c36a596b76bdc43275c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c173b4b996674926da742cdbbfe34c03e6711cf1005fff5faf16a266cfffec0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:46Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.023734 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:46Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.034935 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78c00c81fd5911a4d89ffcb8e555dbe88604cc35b9095a07a9c4d791d26ec9ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:46Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.045757 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:46Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.055592 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67213ffbc314ee091b39df4df290d4cbe30d310a724e99a5d797592d9b0a333c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:46Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.073327 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3e73e40ceff981a3b56eaec0f1efd6bf1fe1e70ea543cda0804aca64fa11449\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a4452064c763db39abe3d67a71b8742fb28999485085b61769d2b695eaab2436\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T16:47:38Z\\\",\\\"message\\\":\\\" handler 6\\\\nI1125 16:47:38.653396 6105 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 16:47:38.653401 6105 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 16:47:38.653425 6105 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 16:47:38.653456 6105 factory.go:656] Stopping watch factory\\\\nI1125 16:47:38.653460 6105 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 16:47:38.653497 6105 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 16:47:38.653428 6105 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 16:47:38.653669 6105 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 16:47:38.653720 6105 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 16:47:38.653993 6105 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:35Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3e73e40ceff981a3b56eaec0f1efd6bf1fe1e70ea543cda0804aca64fa11449\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"message\\\":\\\"4738-8709-09636387cb00}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1125 16:47:39.862298 6259 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-multus/multus-admission-controller]} name:Service_openshift-multus/multus-admission-controller_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.119:443: 10.217.5.119:8443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d4efc4a8-c514-4a6b-901c-2953978b50d3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1125 16:47:39.862362 6259 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: fa\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hwqsk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:46Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.085582 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:46Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.108062 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.108103 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.108113 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.108130 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.108141 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:46Z","lastTransitionTime":"2025-11-25T16:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.210632 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.210675 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.210685 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.210700 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.210711 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:46Z","lastTransitionTime":"2025-11-25T16:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.314632 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.314732 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.314758 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.314864 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.314936 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:46Z","lastTransitionTime":"2025-11-25T16:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.418226 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.418269 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.418301 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.418319 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.418330 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:46Z","lastTransitionTime":"2025-11-25T16:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.521228 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.521285 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.521297 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.521332 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.521346 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:46Z","lastTransitionTime":"2025-11-25T16:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.623796 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.624057 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.624165 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.624236 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.624365 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:46Z","lastTransitionTime":"2025-11-25T16:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.726348 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.726413 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.726430 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.726457 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.726477 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:46Z","lastTransitionTime":"2025-11-25T16:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.828878 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.828922 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.828934 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.828950 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.828961 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:46Z","lastTransitionTime":"2025-11-25T16:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.831175 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:47:46 crc kubenswrapper[4812]: E1125 16:47:46.831310 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.931901 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.931946 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.931957 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.931971 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:46 crc kubenswrapper[4812]: I1125 16:47:46.931980 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:46Z","lastTransitionTime":"2025-11-25T16:47:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.034234 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.034298 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.034309 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.034327 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.034344 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:47Z","lastTransitionTime":"2025-11-25T16:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.137224 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.137286 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.137302 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.137321 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.137333 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:47Z","lastTransitionTime":"2025-11-25T16:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.240202 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.240261 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.240279 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.240302 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.240321 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:47Z","lastTransitionTime":"2025-11-25T16:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.342935 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.342971 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.342982 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.342998 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.343010 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:47Z","lastTransitionTime":"2025-11-25T16:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.446221 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.446260 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.446271 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.446289 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.446299 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:47Z","lastTransitionTime":"2025-11-25T16:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.548254 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.548301 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.548312 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.548327 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.548338 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:47Z","lastTransitionTime":"2025-11-25T16:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.650356 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.650450 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.650461 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.650483 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.650495 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:47Z","lastTransitionTime":"2025-11-25T16:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.753282 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.753360 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.753373 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.753394 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.753450 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:47Z","lastTransitionTime":"2025-11-25T16:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.831262 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.831368 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:47:47 crc kubenswrapper[4812]: E1125 16:47:47.831434 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.831445 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:47:47 crc kubenswrapper[4812]: E1125 16:47:47.831574 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:47:47 crc kubenswrapper[4812]: E1125 16:47:47.831801 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.856152 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.856188 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.856198 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.856211 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.856219 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:47Z","lastTransitionTime":"2025-11-25T16:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.959629 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.960029 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.960042 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.960064 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:47 crc kubenswrapper[4812]: I1125 16:47:47.960078 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:47Z","lastTransitionTime":"2025-11-25T16:47:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.062134 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.062212 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.062226 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.062250 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.062263 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:48Z","lastTransitionTime":"2025-11-25T16:47:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.164618 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.164661 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.164671 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.164687 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.164697 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:48Z","lastTransitionTime":"2025-11-25T16:47:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.266899 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.266938 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.266946 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.266959 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.266969 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:48Z","lastTransitionTime":"2025-11-25T16:47:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.369595 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.369637 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.369646 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.369663 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.369673 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:48Z","lastTransitionTime":"2025-11-25T16:47:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.471661 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.471704 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.471713 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.471727 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.471740 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:48Z","lastTransitionTime":"2025-11-25T16:47:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.573689 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.573724 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.573731 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.573747 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.573760 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:48Z","lastTransitionTime":"2025-11-25T16:47:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.676116 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.676168 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.676181 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.676199 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.676211 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:48Z","lastTransitionTime":"2025-11-25T16:47:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.779260 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.779316 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.779330 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.779350 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.779362 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:48Z","lastTransitionTime":"2025-11-25T16:47:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.830475 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:47:48 crc kubenswrapper[4812]: E1125 16:47:48.830711 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.882778 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.882851 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.882876 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.882904 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.882926 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:48Z","lastTransitionTime":"2025-11-25T16:47:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.986206 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.986263 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.986280 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.986299 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:48 crc kubenswrapper[4812]: I1125 16:47:48.986314 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:48Z","lastTransitionTime":"2025-11-25T16:47:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.089395 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.089442 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.089455 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.089478 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.089490 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:49Z","lastTransitionTime":"2025-11-25T16:47:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.192450 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.192515 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.192548 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.192569 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.192582 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:49Z","lastTransitionTime":"2025-11-25T16:47:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.262113 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fbb57832-3993-492b-80c9-a6a61891a125-metrics-certs\") pod \"network-metrics-daemon-82fvc\" (UID: \"fbb57832-3993-492b-80c9-a6a61891a125\") " pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:47:49 crc kubenswrapper[4812]: E1125 16:47:49.262390 4812 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 16:47:49 crc kubenswrapper[4812]: E1125 16:47:49.262571 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fbb57832-3993-492b-80c9-a6a61891a125-metrics-certs podName:fbb57832-3993-492b-80c9-a6a61891a125 nodeName:}" failed. No retries permitted until 2025-11-25 16:47:57.262493855 +0000 UTC m=+52.102635990 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fbb57832-3993-492b-80c9-a6a61891a125-metrics-certs") pod "network-metrics-daemon-82fvc" (UID: "fbb57832-3993-492b-80c9-a6a61891a125") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.296854 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.296908 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.296922 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.296943 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.296958 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:49Z","lastTransitionTime":"2025-11-25T16:47:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.399799 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.399863 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.399877 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.399896 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.399910 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:49Z","lastTransitionTime":"2025-11-25T16:47:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.502987 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.503053 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.503068 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.503087 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.503098 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:49Z","lastTransitionTime":"2025-11-25T16:47:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.606576 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.606625 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.606640 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.606660 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.606673 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:49Z","lastTransitionTime":"2025-11-25T16:47:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.709558 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.709619 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.709631 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.709653 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.709666 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:49Z","lastTransitionTime":"2025-11-25T16:47:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.812434 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.812510 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.812539 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.812563 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.812600 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:49Z","lastTransitionTime":"2025-11-25T16:47:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.830671 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.830823 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.830878 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:47:49 crc kubenswrapper[4812]: E1125 16:47:49.831062 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:47:49 crc kubenswrapper[4812]: E1125 16:47:49.831245 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:47:49 crc kubenswrapper[4812]: E1125 16:47:49.831393 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.916687 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.916784 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.916821 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.916851 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:49 crc kubenswrapper[4812]: I1125 16:47:49.916866 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:49Z","lastTransitionTime":"2025-11-25T16:47:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.020611 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.020693 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.020720 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.020753 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.020775 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:50Z","lastTransitionTime":"2025-11-25T16:47:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.124749 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.124810 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.124823 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.124848 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.124865 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:50Z","lastTransitionTime":"2025-11-25T16:47:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.227987 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.228047 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.228057 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.228076 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.228086 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:50Z","lastTransitionTime":"2025-11-25T16:47:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.330639 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.330672 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.330683 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.330696 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.330706 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:50Z","lastTransitionTime":"2025-11-25T16:47:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.434817 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.434863 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.434872 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.434894 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.434905 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:50Z","lastTransitionTime":"2025-11-25T16:47:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.537993 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.538092 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.538113 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.538145 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.538172 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:50Z","lastTransitionTime":"2025-11-25T16:47:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.641626 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.641737 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.641752 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.641787 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.641802 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:50Z","lastTransitionTime":"2025-11-25T16:47:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.744639 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.744689 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.744704 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.744726 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.744740 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:50Z","lastTransitionTime":"2025-11-25T16:47:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.830764 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:47:50 crc kubenswrapper[4812]: E1125 16:47:50.830933 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.831703 4812 scope.go:117] "RemoveContainer" containerID="c3e73e40ceff981a3b56eaec0f1efd6bf1fe1e70ea543cda0804aca64fa11449" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.847572 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.847600 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.847608 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.847624 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.847634 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:50Z","lastTransitionTime":"2025-11-25T16:47:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.849002 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:50Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.871103 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c2ba033-91e8-49d8-8474-ebc2dfaeef15\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://179d394510edea595ed1ac39619911e2167f2b9cb5d228635609760932f428e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effc25d4eb530d685c8a7a0acebd25c418c3fc3675b367fe9ab49ae7c22a1cf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7668dab7cab029814b4edd7225c0a6fac9481f4e74f0ab906661b28775cdde45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://69a98172920b9a903cb0defe4630d0c6c1bb43d466ff352eb4d60e36fa18ea23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96c0e65aabd958b32841ea475624de4b58ff5c8690a5c767f6548872f6cc7ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:50Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.890779 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:50Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.908870 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd450cbc-02a7-4b95-8c0f-455df0f1f996\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b3be9778300f71493b3018d697ac9931b4825e96b56386e2d83ad9e75accbd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://067be8c1425967cc2944cc42abc9ab532d98fcc21d129ea57fab364c1aa83d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2fft\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:50Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.927061 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d398af8b4b16b15baa75821d0c30b6a72100aac425aca4e87fe9c1c2412673a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gljt8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:50Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.943174 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m7ndd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a156756-3629-4bed-8de0-1019226b7f04\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chd8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m7ndd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:50Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.951128 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.951181 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.951193 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.951216 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.951229 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:50Z","lastTransitionTime":"2025-11-25T16:47:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.953991 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-82fvc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbb57832-3993-492b-80c9-a6a61891a125\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8dqsg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8dqsg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:41Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-82fvc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:50Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.968465 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3da77f06-36fd-465a-b764-fabfaca65715\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc3351f45003a0574af7993746039012d2cdb2d17ac1dd8b1f8b629dd4d921f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://794035d1a2dbe8b5a1d28a4174ca6729fed6f236749008a13826d8541edd5a21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d11e018db2635e10a6a329a03047f13b3ddf47115077c36a596b76bdc43275c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c173b4b996674926da742cdbbfe34c03e6711cf1005fff5faf16a266cfffec0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:50Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:50 crc kubenswrapper[4812]: I1125 16:47:50.984657 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:50Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.001935 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:50Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.016926 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ed911cf-2139-4b12-84ba-af635585ba29\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b0d9cef2c2ebce63dec5606b3f472a8ac27c7d5353c9e5ea12a9243e699ecb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lcgpx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:51Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.029735 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67213ffbc314ee091b39df4df290d4cbe30d310a724e99a5d797592d9b0a333c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:51Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.047784 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3e73e40ceff981a3b56eaec0f1efd6bf1fe1e70ea543cda0804aca64fa11449\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3e73e40ceff981a3b56eaec0f1efd6bf1fe1e70ea543cda0804aca64fa11449\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"message\\\":\\\"4738-8709-09636387cb00}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1125 16:47:39.862298 6259 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-multus/multus-admission-controller]} name:Service_openshift-multus/multus-admission-controller_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.119:443: 10.217.5.119:8443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d4efc4a8-c514-4a6b-901c-2953978b50d3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1125 16:47:39.862362 6259 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: fa\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:39Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-hwqsk_openshift-ovn-kubernetes(bc4dc9ff-11a1-4151-91f0-3ff83020b3b9)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hwqsk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:51Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.053691 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.053729 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.053738 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.053752 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.053762 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:51Z","lastTransitionTime":"2025-11-25T16:47:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.064143 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:51Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.078976 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:51Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.081769 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hwqsk_bc4dc9ff-11a1-4151-91f0-3ff83020b3b9/ovnkube-controller/1.log" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.085637 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" event={"ID":"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9","Type":"ContainerStarted","Data":"4d370476c9f3acb7ba10ed410262960ab3d86cd6d4593f17c0081f82e9d2cabe"} Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.085780 4812 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.093454 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78c00c81fd5911a4d89ffcb8e555dbe88604cc35b9095a07a9c4d791d26ec9ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:51Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.103513 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:51Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.115896 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:51Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.130298 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:51Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.141872 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd450cbc-02a7-4b95-8c0f-455df0f1f996\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b3be9778300f71493b3018d697ac9931b4825e96b56386e2d83ad9e75accbd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://067be8c1425967cc2944cc42abc9ab532d98fcc21d129ea57fab364c1aa83d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2fft\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:51Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.156828 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.156867 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.156879 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.156901 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.156939 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:51Z","lastTransitionTime":"2025-11-25T16:47:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.161364 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c2ba033-91e8-49d8-8474-ebc2dfaeef15\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://179d394510edea595ed1ac39619911e2167f2b9cb5d228635609760932f428e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effc25d4eb530d685c8a7a0acebd25c418c3fc3675b367fe9ab49ae7c22a1cf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7668dab7cab029814b4edd7225c0a6fac9481f4e74f0ab906661b28775cdde45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://69a98172920b9a903cb0defe4630d0c6c1bb43d466ff352eb4d60e36fa18ea23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96c0e65aabd958b32841ea475624de4b58ff5c8690a5c767f6548872f6cc7ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:51Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.173300 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:51Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.190451 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:51Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.203663 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ed911cf-2139-4b12-84ba-af635585ba29\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b0d9cef2c2ebce63dec5606b3f472a8ac27c7d5353c9e5ea12a9243e699ecb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lcgpx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:51Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.220922 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d398af8b4b16b15baa75821d0c30b6a72100aac425aca4e87fe9c1c2412673a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gljt8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:51Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.238593 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m7ndd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a156756-3629-4bed-8de0-1019226b7f04\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chd8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m7ndd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:51Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.256246 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-82fvc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbb57832-3993-492b-80c9-a6a61891a125\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8dqsg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8dqsg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:41Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-82fvc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:51Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.259008 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.259088 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.259101 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.259130 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.259149 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:51Z","lastTransitionTime":"2025-11-25T16:47:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.279427 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3da77f06-36fd-465a-b764-fabfaca65715\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc3351f45003a0574af7993746039012d2cdb2d17ac1dd8b1f8b629dd4d921f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://794035d1a2dbe8b5a1d28a4174ca6729fed6f236749008a13826d8541edd5a21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d11e018db2635e10a6a329a03047f13b3ddf47115077c36a596b76bdc43275c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c173b4b996674926da742cdbbfe34c03e6711cf1005fff5faf16a266cfffec0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:51Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.296615 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:51Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.320550 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:51Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.333666 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78c00c81fd5911a4d89ffcb8e555dbe88604cc35b9095a07a9c4d791d26ec9ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:51Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.356635 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:51Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.361460 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.361522 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.361551 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.361574 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.361588 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:51Z","lastTransitionTime":"2025-11-25T16:47:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.370990 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67213ffbc314ee091b39df4df290d4cbe30d310a724e99a5d797592d9b0a333c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:51Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.392696 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d370476c9f3acb7ba10ed410262960ab3d86cd6d4593f17c0081f82e9d2cabe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3e73e40ceff981a3b56eaec0f1efd6bf1fe1e70ea543cda0804aca64fa11449\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"message\\\":\\\"4738-8709-09636387cb00}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1125 16:47:39.862298 6259 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-multus/multus-admission-controller]} name:Service_openshift-multus/multus-admission-controller_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.119:443: 10.217.5.119:8443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d4efc4a8-c514-4a6b-901c-2953978b50d3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1125 16:47:39.862362 6259 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: fa\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:39Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hwqsk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:51Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.464673 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.464712 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.464721 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.464739 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.464751 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:51Z","lastTransitionTime":"2025-11-25T16:47:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.570671 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.570754 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.570772 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.570799 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.570823 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:51Z","lastTransitionTime":"2025-11-25T16:47:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.674256 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.674302 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.674313 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.674332 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.674347 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:51Z","lastTransitionTime":"2025-11-25T16:47:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.777020 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.777071 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.777088 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.777119 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.777154 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:51Z","lastTransitionTime":"2025-11-25T16:47:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.831225 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.831226 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:47:51 crc kubenswrapper[4812]: E1125 16:47:51.831436 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.831224 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:47:51 crc kubenswrapper[4812]: E1125 16:47:51.831638 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:47:51 crc kubenswrapper[4812]: E1125 16:47:51.831786 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.879720 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.879794 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.879818 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.879846 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.879863 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:51Z","lastTransitionTime":"2025-11-25T16:47:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.983770 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.983826 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.983836 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.983853 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:51 crc kubenswrapper[4812]: I1125 16:47:51.983866 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:51Z","lastTransitionTime":"2025-11-25T16:47:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.087047 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.087138 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.087167 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.087230 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.087255 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:52Z","lastTransitionTime":"2025-11-25T16:47:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.091976 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hwqsk_bc4dc9ff-11a1-4151-91f0-3ff83020b3b9/ovnkube-controller/2.log" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.093065 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hwqsk_bc4dc9ff-11a1-4151-91f0-3ff83020b3b9/ovnkube-controller/1.log" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.096096 4812 generic.go:334] "Generic (PLEG): container finished" podID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerID="4d370476c9f3acb7ba10ed410262960ab3d86cd6d4593f17c0081f82e9d2cabe" exitCode=1 Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.096150 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" event={"ID":"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9","Type":"ContainerDied","Data":"4d370476c9f3acb7ba10ed410262960ab3d86cd6d4593f17c0081f82e9d2cabe"} Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.096192 4812 scope.go:117] "RemoveContainer" containerID="c3e73e40ceff981a3b56eaec0f1efd6bf1fe1e70ea543cda0804aca64fa11449" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.096940 4812 scope.go:117] "RemoveContainer" containerID="4d370476c9f3acb7ba10ed410262960ab3d86cd6d4593f17c0081f82e9d2cabe" Nov 25 16:47:52 crc kubenswrapper[4812]: E1125 16:47:52.097115 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hwqsk_openshift-ovn-kubernetes(bc4dc9ff-11a1-4151-91f0-3ff83020b3b9)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.111962 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:52Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.136287 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:52Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.154953 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78c00c81fd5911a4d89ffcb8e555dbe88604cc35b9095a07a9c4d791d26ec9ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:52Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.171997 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:52Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.184780 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67213ffbc314ee091b39df4df290d4cbe30d310a724e99a5d797592d9b0a333c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:52Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.189888 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.190047 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.190135 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.190200 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.190256 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:52Z","lastTransitionTime":"2025-11-25T16:47:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.204244 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d370476c9f3acb7ba10ed410262960ab3d86cd6d4593f17c0081f82e9d2cabe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3e73e40ceff981a3b56eaec0f1efd6bf1fe1e70ea543cda0804aca64fa11449\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"message\\\":\\\"4738-8709-09636387cb00}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1125 16:47:39.862298 6259 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-multus/multus-admission-controller]} name:Service_openshift-multus/multus-admission-controller_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.119:443: 10.217.5.119:8443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d4efc4a8-c514-4a6b-901c-2953978b50d3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1125 16:47:39.862362 6259 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: fa\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:39Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d370476c9f3acb7ba10ed410262960ab3d86cd6d4593f17c0081f82e9d2cabe\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T16:47:51Z\\\",\\\"message\\\":\\\"ork-check-target on namespace openshift-network-diagnostics for network=default : 6.828662ms\\\\nI1125 16:47:51.741645 6467 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}\\\\nI1125 16:47:51.741658 6467 services_controller.go:360] Finished syncing service metrics on namespace openshift-service-ca-operator for network=default : 7.967222ms\\\\nI1125 16:47:51.746551 6467 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 16:47:51.746632 6467 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 16:47:51.748460 6467 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 16:47:51.748606 6467 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1125 16:47:51.748659 6467 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 16:47:51.748732 6467 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 16:47:51.748798 6467 factory.go:656] Stopping watch factory\\\\nI1125 16:47:51.748841 6467 ovnkube.go:599] Stopped ovnkube\\\\nI1125 16:47:51.748904 6467 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 16:47:51.749041 6467 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hwqsk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:52Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.216731 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:52Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.230033 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:52Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.241576 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd450cbc-02a7-4b95-8c0f-455df0f1f996\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b3be9778300f71493b3018d697ac9931b4825e96b56386e2d83ad9e75accbd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://067be8c1425967cc2944cc42abc9ab532d98fcc21d129ea57fab364c1aa83d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2fft\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:52Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.261313 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c2ba033-91e8-49d8-8474-ebc2dfaeef15\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://179d394510edea595ed1ac39619911e2167f2b9cb5d228635609760932f428e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effc25d4eb530d685c8a7a0acebd25c418c3fc3675b367fe9ab49ae7c22a1cf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7668dab7cab029814b4edd7225c0a6fac9481f4e74f0ab906661b28775cdde45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://69a98172920b9a903cb0defe4630d0c6c1bb43d466ff352eb4d60e36fa18ea23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96c0e65aabd958b32841ea475624de4b58ff5c8690a5c767f6548872f6cc7ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:52Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.274121 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:52Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.287508 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:52Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.292723 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.292753 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.292762 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.292781 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.292793 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:52Z","lastTransitionTime":"2025-11-25T16:47:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.300326 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ed911cf-2139-4b12-84ba-af635585ba29\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b0d9cef2c2ebce63dec5606b3f472a8ac27c7d5353c9e5ea12a9243e699ecb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lcgpx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:52Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.314689 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d398af8b4b16b15baa75821d0c30b6a72100aac425aca4e87fe9c1c2412673a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gljt8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:52Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.333229 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m7ndd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a156756-3629-4bed-8de0-1019226b7f04\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chd8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m7ndd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:52Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.345371 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-82fvc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbb57832-3993-492b-80c9-a6a61891a125\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8dqsg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8dqsg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:41Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-82fvc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:52Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.363381 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3da77f06-36fd-465a-b764-fabfaca65715\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc3351f45003a0574af7993746039012d2cdb2d17ac1dd8b1f8b629dd4d921f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://794035d1a2dbe8b5a1d28a4174ca6729fed6f236749008a13826d8541edd5a21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d11e018db2635e10a6a329a03047f13b3ddf47115077c36a596b76bdc43275c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c173b4b996674926da742cdbbfe34c03e6711cf1005fff5faf16a266cfffec0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:52Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.396070 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.396116 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.396127 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.396144 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.396155 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:52Z","lastTransitionTime":"2025-11-25T16:47:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.499112 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.499170 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.499182 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.499202 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.499216 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:52Z","lastTransitionTime":"2025-11-25T16:47:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.602700 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.602784 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.602809 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.602842 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.602868 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:52Z","lastTransitionTime":"2025-11-25T16:47:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.706934 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.707005 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.707024 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.707053 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.707075 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:52Z","lastTransitionTime":"2025-11-25T16:47:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:52 crc kubenswrapper[4812]: E1125 16:47:52.723678 4812 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"42bff2ed-94cf-457c-8bcf-017111af962a\\\",\\\"systemUUID\\\":\\\"93542aec-3cae-4037-9cb4-28e49d8b2f68\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:52Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.728686 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.728758 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.728772 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.728800 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.728817 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:52Z","lastTransitionTime":"2025-11-25T16:47:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:52 crc kubenswrapper[4812]: E1125 16:47:52.742587 4812 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"42bff2ed-94cf-457c-8bcf-017111af962a\\\",\\\"systemUUID\\\":\\\"93542aec-3cae-4037-9cb4-28e49d8b2f68\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:52Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.746619 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.746658 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.746667 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.746681 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.746691 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:52Z","lastTransitionTime":"2025-11-25T16:47:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:52 crc kubenswrapper[4812]: E1125 16:47:52.761029 4812 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"42bff2ed-94cf-457c-8bcf-017111af962a\\\",\\\"systemUUID\\\":\\\"93542aec-3cae-4037-9cb4-28e49d8b2f68\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:52Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.765492 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.765564 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.765575 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.765593 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.765606 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:52Z","lastTransitionTime":"2025-11-25T16:47:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:52 crc kubenswrapper[4812]: E1125 16:47:52.785060 4812 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"42bff2ed-94cf-457c-8bcf-017111af962a\\\",\\\"systemUUID\\\":\\\"93542aec-3cae-4037-9cb4-28e49d8b2f68\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:52Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.788976 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.789015 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.789028 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.789046 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.789058 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:52Z","lastTransitionTime":"2025-11-25T16:47:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:52 crc kubenswrapper[4812]: E1125 16:47:52.802891 4812 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"42bff2ed-94cf-457c-8bcf-017111af962a\\\",\\\"systemUUID\\\":\\\"93542aec-3cae-4037-9cb4-28e49d8b2f68\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:52Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:52 crc kubenswrapper[4812]: E1125 16:47:52.803064 4812 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.805351 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.805392 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.805402 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.805417 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.805429 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:52Z","lastTransitionTime":"2025-11-25T16:47:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.830659 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:47:52 crc kubenswrapper[4812]: E1125 16:47:52.830867 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.908130 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.908166 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.908178 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.908199 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:52 crc kubenswrapper[4812]: I1125 16:47:52.908210 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:52Z","lastTransitionTime":"2025-11-25T16:47:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.011012 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.011075 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.011085 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.011100 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.011110 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:53Z","lastTransitionTime":"2025-11-25T16:47:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.100458 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hwqsk_bc4dc9ff-11a1-4151-91f0-3ff83020b3b9/ovnkube-controller/2.log" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.113281 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.113301 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.113309 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.113320 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.113329 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:53Z","lastTransitionTime":"2025-11-25T16:47:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.214973 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.215025 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.215041 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.215064 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.215080 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:53Z","lastTransitionTime":"2025-11-25T16:47:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.317317 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.317355 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.317364 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.317380 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.317390 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:53Z","lastTransitionTime":"2025-11-25T16:47:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.419405 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.419474 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.419490 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.419514 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.419563 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:53Z","lastTransitionTime":"2025-11-25T16:47:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.522490 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.522555 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.522565 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.522584 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.522594 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:53Z","lastTransitionTime":"2025-11-25T16:47:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.625775 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.625853 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.625877 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.625907 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.625924 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:53Z","lastTransitionTime":"2025-11-25T16:47:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.728556 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.728616 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.728627 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.728643 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.728654 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:53Z","lastTransitionTime":"2025-11-25T16:47:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.831227 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.831279 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.831330 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:47:53 crc kubenswrapper[4812]: E1125 16:47:53.831382 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:47:53 crc kubenswrapper[4812]: E1125 16:47:53.831463 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:47:53 crc kubenswrapper[4812]: E1125 16:47:53.831549 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.831843 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.831872 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.831888 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.831908 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.831923 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:53Z","lastTransitionTime":"2025-11-25T16:47:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.934877 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.934942 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.934951 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.934969 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:53 crc kubenswrapper[4812]: I1125 16:47:53.935004 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:53Z","lastTransitionTime":"2025-11-25T16:47:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.038214 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.038280 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.038289 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.038305 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.038317 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:54Z","lastTransitionTime":"2025-11-25T16:47:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.141648 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.141703 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.141719 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.141747 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.141765 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:54Z","lastTransitionTime":"2025-11-25T16:47:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.244455 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.244562 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.244586 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.244618 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.244640 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:54Z","lastTransitionTime":"2025-11-25T16:47:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.349105 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.349170 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.349182 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.349202 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.349213 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:54Z","lastTransitionTime":"2025-11-25T16:47:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.451855 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.452611 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.452633 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.452657 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.452673 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:54Z","lastTransitionTime":"2025-11-25T16:47:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.556498 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.557096 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.557236 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.557374 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.557493 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:54Z","lastTransitionTime":"2025-11-25T16:47:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.659927 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.659981 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.659992 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.660009 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.660020 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:54Z","lastTransitionTime":"2025-11-25T16:47:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.763168 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.763235 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.763249 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.763270 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.763285 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:54Z","lastTransitionTime":"2025-11-25T16:47:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.831184 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:47:54 crc kubenswrapper[4812]: E1125 16:47:54.831348 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.865866 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.865910 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.865921 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.865938 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.865949 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:54Z","lastTransitionTime":"2025-11-25T16:47:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.969186 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.969235 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.969246 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.969264 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:54 crc kubenswrapper[4812]: I1125 16:47:54.969276 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:54Z","lastTransitionTime":"2025-11-25T16:47:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.071236 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.071274 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.071283 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.071295 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.071303 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:55Z","lastTransitionTime":"2025-11-25T16:47:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.173808 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.173882 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.173906 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.173936 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.173957 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:55Z","lastTransitionTime":"2025-11-25T16:47:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.277138 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.277175 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.277186 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.277206 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.277217 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:55Z","lastTransitionTime":"2025-11-25T16:47:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.380599 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.380636 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.380647 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.380665 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.380675 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:55Z","lastTransitionTime":"2025-11-25T16:47:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.484661 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.484736 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.484762 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.484797 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.484821 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:55Z","lastTransitionTime":"2025-11-25T16:47:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.587498 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.587582 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.587595 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.587613 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.587627 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:55Z","lastTransitionTime":"2025-11-25T16:47:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.690074 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.690119 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.690133 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.690150 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.690160 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:55Z","lastTransitionTime":"2025-11-25T16:47:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.793268 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.793333 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.793347 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.793375 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.793390 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:55Z","lastTransitionTime":"2025-11-25T16:47:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.831477 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.831584 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:47:55 crc kubenswrapper[4812]: E1125 16:47:55.831695 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:47:55 crc kubenswrapper[4812]: E1125 16:47:55.831803 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.832011 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:47:55 crc kubenswrapper[4812]: E1125 16:47:55.832169 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.846612 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.847435 4812 scope.go:117] "RemoveContainer" containerID="4d370476c9f3acb7ba10ed410262960ab3d86cd6d4593f17c0081f82e9d2cabe" Nov 25 16:47:55 crc kubenswrapper[4812]: E1125 16:47:55.847620 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hwqsk_openshift-ovn-kubernetes(bc4dc9ff-11a1-4151-91f0-3ff83020b3b9)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.857502 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c2ba033-91e8-49d8-8474-ebc2dfaeef15\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://179d394510edea595ed1ac39619911e2167f2b9cb5d228635609760932f428e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effc25d4eb530d685c8a7a0acebd25c418c3fc3675b367fe9ab49ae7c22a1cf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7668dab7cab029814b4edd7225c0a6fac9481f4e74f0ab906661b28775cdde45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://69a98172920b9a903cb0defe4630d0c6c1bb43d466ff352eb4d60e36fa18ea23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96c0e65aabd958b32841ea475624de4b58ff5c8690a5c767f6548872f6cc7ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:55Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.874579 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:55Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.886466 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd450cbc-02a7-4b95-8c0f-455df0f1f996\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b3be9778300f71493b3018d697ac9931b4825e96b56386e2d83ad9e75accbd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://067be8c1425967cc2944cc42abc9ab532d98fcc21d129ea57fab364c1aa83d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2fft\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:55Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.896072 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.896548 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.896593 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.896624 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.896654 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:55Z","lastTransitionTime":"2025-11-25T16:47:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.904123 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3da77f06-36fd-465a-b764-fabfaca65715\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc3351f45003a0574af7993746039012d2cdb2d17ac1dd8b1f8b629dd4d921f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://794035d1a2dbe8b5a1d28a4174ca6729fed6f236749008a13826d8541edd5a21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d11e018db2635e10a6a329a03047f13b3ddf47115077c36a596b76bdc43275c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c173b4b996674926da742cdbbfe34c03e6711cf1005fff5faf16a266cfffec0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:55Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.919802 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:55Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.933081 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:55Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.943920 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ed911cf-2139-4b12-84ba-af635585ba29\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b0d9cef2c2ebce63dec5606b3f472a8ac27c7d5353c9e5ea12a9243e699ecb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lcgpx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:55Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.957440 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d398af8b4b16b15baa75821d0c30b6a72100aac425aca4e87fe9c1c2412673a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gljt8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:55Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.968627 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m7ndd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a156756-3629-4bed-8de0-1019226b7f04\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chd8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m7ndd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:55Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.978298 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-82fvc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbb57832-3993-492b-80c9-a6a61891a125\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8dqsg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8dqsg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:41Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-82fvc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:55Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.994196 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:55Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.999473 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.999634 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.999721 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.999815 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:55 crc kubenswrapper[4812]: I1125 16:47:55.999893 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:55Z","lastTransitionTime":"2025-11-25T16:47:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.007999 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:56Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.020704 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78c00c81fd5911a4d89ffcb8e555dbe88604cc35b9095a07a9c4d791d26ec9ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:56Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.032766 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:56Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.044429 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67213ffbc314ee091b39df4df290d4cbe30d310a724e99a5d797592d9b0a333c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:56Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.061024 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d370476c9f3acb7ba10ed410262960ab3d86cd6d4593f17c0081f82e9d2cabe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3e73e40ceff981a3b56eaec0f1efd6bf1fe1e70ea543cda0804aca64fa11449\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"message\\\":\\\"4738-8709-09636387cb00}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1125 16:47:39.862298 6259 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-multus/multus-admission-controller]} name:Service_openshift-multus/multus-admission-controller_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.119:443: 10.217.5.119:8443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d4efc4a8-c514-4a6b-901c-2953978b50d3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1125 16:47:39.862362 6259 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: fa\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:39Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d370476c9f3acb7ba10ed410262960ab3d86cd6d4593f17c0081f82e9d2cabe\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T16:47:51Z\\\",\\\"message\\\":\\\"ork-check-target on namespace openshift-network-diagnostics for network=default : 6.828662ms\\\\nI1125 16:47:51.741645 6467 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}\\\\nI1125 16:47:51.741658 6467 services_controller.go:360] Finished syncing service metrics on namespace openshift-service-ca-operator for network=default : 7.967222ms\\\\nI1125 16:47:51.746551 6467 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 16:47:51.746632 6467 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 16:47:51.748460 6467 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 16:47:51.748606 6467 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1125 16:47:51.748659 6467 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 16:47:51.748732 6467 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 16:47:51.748798 6467 factory.go:656] Stopping watch factory\\\\nI1125 16:47:51.748841 6467 ovnkube.go:599] Stopped ovnkube\\\\nI1125 16:47:51.748904 6467 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 16:47:51.749041 6467 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hwqsk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:56Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.073406 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:56Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.085588 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67213ffbc314ee091b39df4df290d4cbe30d310a724e99a5d797592d9b0a333c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:56Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.101219 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d370476c9f3acb7ba10ed410262960ab3d86cd6d4593f17c0081f82e9d2cabe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d370476c9f3acb7ba10ed410262960ab3d86cd6d4593f17c0081f82e9d2cabe\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T16:47:51Z\\\",\\\"message\\\":\\\"ork-check-target on namespace openshift-network-diagnostics for network=default : 6.828662ms\\\\nI1125 16:47:51.741645 6467 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}\\\\nI1125 16:47:51.741658 6467 services_controller.go:360] Finished syncing service metrics on namespace openshift-service-ca-operator for network=default : 7.967222ms\\\\nI1125 16:47:51.746551 6467 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 16:47:51.746632 6467 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 16:47:51.748460 6467 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 16:47:51.748606 6467 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1125 16:47:51.748659 6467 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 16:47:51.748732 6467 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 16:47:51.748798 6467 factory.go:656] Stopping watch factory\\\\nI1125 16:47:51.748841 6467 ovnkube.go:599] Stopped ovnkube\\\\nI1125 16:47:51.748904 6467 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 16:47:51.749041 6467 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:50Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hwqsk_openshift-ovn-kubernetes(bc4dc9ff-11a1-4151-91f0-3ff83020b3b9)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hwqsk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:56Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.102344 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.102391 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.102407 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.102434 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.102473 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:56Z","lastTransitionTime":"2025-11-25T16:47:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.115829 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:56Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.128204 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:56Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.139666 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78c00c81fd5911a4d89ffcb8e555dbe88604cc35b9095a07a9c4d791d26ec9ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:56Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.150200 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:56Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.161691 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:56Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.179006 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c2ba033-91e8-49d8-8474-ebc2dfaeef15\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://179d394510edea595ed1ac39619911e2167f2b9cb5d228635609760932f428e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effc25d4eb530d685c8a7a0acebd25c418c3fc3675b367fe9ab49ae7c22a1cf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7668dab7cab029814b4edd7225c0a6fac9481f4e74f0ab906661b28775cdde45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://69a98172920b9a903cb0defe4630d0c6c1bb43d466ff352eb4d60e36fa18ea23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96c0e65aabd958b32841ea475624de4b58ff5c8690a5c767f6548872f6cc7ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:56Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.191465 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:56Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.202626 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd450cbc-02a7-4b95-8c0f-455df0f1f996\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b3be9778300f71493b3018d697ac9931b4825e96b56386e2d83ad9e75accbd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://067be8c1425967cc2944cc42abc9ab532d98fcc21d129ea57fab364c1aa83d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2fft\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:56Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.204224 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.204251 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.204259 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.204273 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.204284 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:56Z","lastTransitionTime":"2025-11-25T16:47:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.217313 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d398af8b4b16b15baa75821d0c30b6a72100aac425aca4e87fe9c1c2412673a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gljt8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:56Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.229936 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m7ndd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a156756-3629-4bed-8de0-1019226b7f04\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chd8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m7ndd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:56Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.244483 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-82fvc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbb57832-3993-492b-80c9-a6a61891a125\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8dqsg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8dqsg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:41Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-82fvc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:56Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.255960 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3da77f06-36fd-465a-b764-fabfaca65715\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc3351f45003a0574af7993746039012d2cdb2d17ac1dd8b1f8b629dd4d921f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://794035d1a2dbe8b5a1d28a4174ca6729fed6f236749008a13826d8541edd5a21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d11e018db2635e10a6a329a03047f13b3ddf47115077c36a596b76bdc43275c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c173b4b996674926da742cdbbfe34c03e6711cf1005fff5faf16a266cfffec0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:56Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.268460 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:56Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.281166 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:56Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.295082 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ed911cf-2139-4b12-84ba-af635585ba29\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b0d9cef2c2ebce63dec5606b3f472a8ac27c7d5353c9e5ea12a9243e699ecb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lcgpx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:56Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.307067 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.307109 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.307118 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.307136 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.307145 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:56Z","lastTransitionTime":"2025-11-25T16:47:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.410334 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.410387 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.410403 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.410424 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.410437 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:56Z","lastTransitionTime":"2025-11-25T16:47:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.512863 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.512917 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.512933 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.512954 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.512970 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:56Z","lastTransitionTime":"2025-11-25T16:47:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.615315 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.615354 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.615364 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.615379 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.615390 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:56Z","lastTransitionTime":"2025-11-25T16:47:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.717416 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.717447 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.717455 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.717469 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.717478 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:56Z","lastTransitionTime":"2025-11-25T16:47:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.820263 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.820344 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.820357 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.820375 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.820387 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:56Z","lastTransitionTime":"2025-11-25T16:47:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.830682 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:47:56 crc kubenswrapper[4812]: E1125 16:47:56.830858 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.923570 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.923605 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.923616 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.923631 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:56 crc kubenswrapper[4812]: I1125 16:47:56.923643 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:56Z","lastTransitionTime":"2025-11-25T16:47:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.026607 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.026658 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.026675 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.026699 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.026717 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:57Z","lastTransitionTime":"2025-11-25T16:47:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.129565 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.129610 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.129624 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.129673 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.129685 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:57Z","lastTransitionTime":"2025-11-25T16:47:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.233408 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.233461 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.233678 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.233697 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.233707 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:57Z","lastTransitionTime":"2025-11-25T16:47:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.336217 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.336274 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.336285 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.336303 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.336327 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:57Z","lastTransitionTime":"2025-11-25T16:47:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.342930 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fbb57832-3993-492b-80c9-a6a61891a125-metrics-certs\") pod \"network-metrics-daemon-82fvc\" (UID: \"fbb57832-3993-492b-80c9-a6a61891a125\") " pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:47:57 crc kubenswrapper[4812]: E1125 16:47:57.343044 4812 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 16:47:57 crc kubenswrapper[4812]: E1125 16:47:57.343094 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fbb57832-3993-492b-80c9-a6a61891a125-metrics-certs podName:fbb57832-3993-492b-80c9-a6a61891a125 nodeName:}" failed. No retries permitted until 2025-11-25 16:48:13.343079213 +0000 UTC m=+68.183221308 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fbb57832-3993-492b-80c9-a6a61891a125-metrics-certs") pod "network-metrics-daemon-82fvc" (UID: "fbb57832-3993-492b-80c9-a6a61891a125") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.439819 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.439911 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.439936 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.439969 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.439996 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:57Z","lastTransitionTime":"2025-11-25T16:47:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.542822 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.542884 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.542901 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.542930 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.542950 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:57Z","lastTransitionTime":"2025-11-25T16:47:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.545379 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.545488 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:47:57 crc kubenswrapper[4812]: E1125 16:47:57.545626 4812 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 16:47:57 crc kubenswrapper[4812]: E1125 16:47:57.545635 4812 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 16:47:57 crc kubenswrapper[4812]: E1125 16:47:57.545707 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 16:48:29.545683571 +0000 UTC m=+84.385825656 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 16:47:57 crc kubenswrapper[4812]: E1125 16:47:57.545723 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 16:48:29.545716741 +0000 UTC m=+84.385858836 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.645925 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.645997 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.646018 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.646056 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.646084 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:57Z","lastTransitionTime":"2025-11-25T16:47:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.646200 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:47:57 crc kubenswrapper[4812]: E1125 16:47:57.646575 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:48:29.646518272 +0000 UTC m=+84.486660367 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.646710 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.646765 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:47:57 crc kubenswrapper[4812]: E1125 16:47:57.646901 4812 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 16:47:57 crc kubenswrapper[4812]: E1125 16:47:57.646918 4812 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 16:47:57 crc kubenswrapper[4812]: E1125 16:47:57.646930 4812 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 16:47:57 crc kubenswrapper[4812]: E1125 16:47:57.646970 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 16:48:29.646960193 +0000 UTC m=+84.487102388 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 16:47:57 crc kubenswrapper[4812]: E1125 16:47:57.646902 4812 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 16:47:57 crc kubenswrapper[4812]: E1125 16:47:57.647117 4812 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 16:47:57 crc kubenswrapper[4812]: E1125 16:47:57.647134 4812 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 16:47:57 crc kubenswrapper[4812]: E1125 16:47:57.647228 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 16:48:29.64720772 +0000 UTC m=+84.487349865 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.749190 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.749277 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.749304 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.749338 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.749359 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:57Z","lastTransitionTime":"2025-11-25T16:47:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.831220 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.831273 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.831355 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:47:57 crc kubenswrapper[4812]: E1125 16:47:57.831511 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:47:57 crc kubenswrapper[4812]: E1125 16:47:57.831705 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:47:57 crc kubenswrapper[4812]: E1125 16:47:57.831900 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.852721 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.852796 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.852816 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.852843 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.852865 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:57Z","lastTransitionTime":"2025-11-25T16:47:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.956683 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.956763 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.956780 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.956807 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:57 crc kubenswrapper[4812]: I1125 16:47:57.956824 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:57Z","lastTransitionTime":"2025-11-25T16:47:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.045133 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.059505 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.059574 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.059590 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.059611 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.059626 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:58Z","lastTransitionTime":"2025-11-25T16:47:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.061308 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.063197 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m7ndd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a156756-3629-4bed-8de0-1019226b7f04\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chd8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m7ndd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:58Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.076019 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-82fvc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbb57832-3993-492b-80c9-a6a61891a125\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8dqsg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8dqsg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:41Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-82fvc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:58Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.091061 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3da77f06-36fd-465a-b764-fabfaca65715\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc3351f45003a0574af7993746039012d2cdb2d17ac1dd8b1f8b629dd4d921f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://794035d1a2dbe8b5a1d28a4174ca6729fed6f236749008a13826d8541edd5a21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d11e018db2635e10a6a329a03047f13b3ddf47115077c36a596b76bdc43275c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c173b4b996674926da742cdbbfe34c03e6711cf1005fff5faf16a266cfffec0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:58Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.105520 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:58Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.122607 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:58Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.134452 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ed911cf-2139-4b12-84ba-af635585ba29\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b0d9cef2c2ebce63dec5606b3f472a8ac27c7d5353c9e5ea12a9243e699ecb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lcgpx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:58Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.151469 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d398af8b4b16b15baa75821d0c30b6a72100aac425aca4e87fe9c1c2412673a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gljt8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:58Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.161828 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.161876 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.161888 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.161906 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.161919 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:58Z","lastTransitionTime":"2025-11-25T16:47:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.171699 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d370476c9f3acb7ba10ed410262960ab3d86cd6d4593f17c0081f82e9d2cabe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d370476c9f3acb7ba10ed410262960ab3d86cd6d4593f17c0081f82e9d2cabe\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T16:47:51Z\\\",\\\"message\\\":\\\"ork-check-target on namespace openshift-network-diagnostics for network=default : 6.828662ms\\\\nI1125 16:47:51.741645 6467 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}\\\\nI1125 16:47:51.741658 6467 services_controller.go:360] Finished syncing service metrics on namespace openshift-service-ca-operator for network=default : 7.967222ms\\\\nI1125 16:47:51.746551 6467 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 16:47:51.746632 6467 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 16:47:51.748460 6467 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 16:47:51.748606 6467 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1125 16:47:51.748659 6467 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 16:47:51.748732 6467 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 16:47:51.748798 6467 factory.go:656] Stopping watch factory\\\\nI1125 16:47:51.748841 6467 ovnkube.go:599] Stopped ovnkube\\\\nI1125 16:47:51.748904 6467 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 16:47:51.749041 6467 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:50Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hwqsk_openshift-ovn-kubernetes(bc4dc9ff-11a1-4151-91f0-3ff83020b3b9)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hwqsk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:58Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.186520 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:58Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.198252 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:58Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.208644 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78c00c81fd5911a4d89ffcb8e555dbe88604cc35b9095a07a9c4d791d26ec9ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:58Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.220080 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:58Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.232967 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67213ffbc314ee091b39df4df290d4cbe30d310a724e99a5d797592d9b0a333c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:58Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.248608 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:58Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.264238 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.264285 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.264297 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.264318 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.264331 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:58Z","lastTransitionTime":"2025-11-25T16:47:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.274267 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c2ba033-91e8-49d8-8474-ebc2dfaeef15\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://179d394510edea595ed1ac39619911e2167f2b9cb5d228635609760932f428e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effc25d4eb530d685c8a7a0acebd25c418c3fc3675b367fe9ab49ae7c22a1cf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7668dab7cab029814b4edd7225c0a6fac9481f4e74f0ab906661b28775cdde45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://69a98172920b9a903cb0defe4630d0c6c1bb43d466ff352eb4d60e36fa18ea23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96c0e65aabd958b32841ea475624de4b58ff5c8690a5c767f6548872f6cc7ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:58Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.286369 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:58Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.297448 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd450cbc-02a7-4b95-8c0f-455df0f1f996\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b3be9778300f71493b3018d697ac9931b4825e96b56386e2d83ad9e75accbd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://067be8c1425967cc2944cc42abc9ab532d98fcc21d129ea57fab364c1aa83d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2fft\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:47:58Z is after 2025-08-24T17:21:41Z" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.367185 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.367220 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.367229 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.367245 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.367254 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:58Z","lastTransitionTime":"2025-11-25T16:47:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.469977 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.470027 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.470037 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.470055 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.470065 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:58Z","lastTransitionTime":"2025-11-25T16:47:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.573504 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.573612 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.573630 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.573663 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.573684 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:58Z","lastTransitionTime":"2025-11-25T16:47:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.676452 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.676500 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.676515 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.676549 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.676562 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:58Z","lastTransitionTime":"2025-11-25T16:47:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.779632 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.779755 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.779787 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.779824 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.779845 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:58Z","lastTransitionTime":"2025-11-25T16:47:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.831509 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:47:58 crc kubenswrapper[4812]: E1125 16:47:58.831867 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.883343 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.883460 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.883480 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.883511 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.883578 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:58Z","lastTransitionTime":"2025-11-25T16:47:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.986093 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.986133 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.986144 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.986157 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:58 crc kubenswrapper[4812]: I1125 16:47:58.986166 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:58Z","lastTransitionTime":"2025-11-25T16:47:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.090617 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.090687 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.090707 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.090738 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.090761 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:59Z","lastTransitionTime":"2025-11-25T16:47:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.194067 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.194135 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.194154 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.194186 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.194207 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:59Z","lastTransitionTime":"2025-11-25T16:47:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.298215 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.298314 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.298354 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.298387 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.298410 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:59Z","lastTransitionTime":"2025-11-25T16:47:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.401520 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.401584 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.401594 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.401611 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.401625 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:59Z","lastTransitionTime":"2025-11-25T16:47:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.505045 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.505084 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.505093 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.505108 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.505117 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:59Z","lastTransitionTime":"2025-11-25T16:47:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.607981 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.608012 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.608020 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.608033 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.608041 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:59Z","lastTransitionTime":"2025-11-25T16:47:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.711658 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.711735 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.711745 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.711763 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.711791 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:59Z","lastTransitionTime":"2025-11-25T16:47:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.814944 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.814975 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.814985 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.815027 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.815041 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:59Z","lastTransitionTime":"2025-11-25T16:47:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.830642 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.830826 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.831002 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:47:59 crc kubenswrapper[4812]: E1125 16:47:59.831000 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:47:59 crc kubenswrapper[4812]: E1125 16:47:59.831312 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:47:59 crc kubenswrapper[4812]: E1125 16:47:59.831324 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.918313 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.918355 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.918365 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.918381 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:47:59 crc kubenswrapper[4812]: I1125 16:47:59.918394 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:47:59Z","lastTransitionTime":"2025-11-25T16:47:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.021195 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.021246 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.021258 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.021283 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.021297 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:00Z","lastTransitionTime":"2025-11-25T16:48:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.124141 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.124222 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.124242 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.124274 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.124294 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:00Z","lastTransitionTime":"2025-11-25T16:48:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.227181 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.227234 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.227250 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.227271 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.227288 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:00Z","lastTransitionTime":"2025-11-25T16:48:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.330302 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.330349 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.330387 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.330405 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.330416 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:00Z","lastTransitionTime":"2025-11-25T16:48:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.433827 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.433897 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.433914 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.433936 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.433955 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:00Z","lastTransitionTime":"2025-11-25T16:48:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.536766 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.536807 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.536818 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.536834 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.536846 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:00Z","lastTransitionTime":"2025-11-25T16:48:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.639083 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.639141 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.639151 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.639172 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.639184 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:00Z","lastTransitionTime":"2025-11-25T16:48:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.742035 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.742118 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.742135 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.742163 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.742178 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:00Z","lastTransitionTime":"2025-11-25T16:48:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.830917 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:48:00 crc kubenswrapper[4812]: E1125 16:48:00.831174 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.845324 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.845389 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.845400 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.845420 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.845433 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:00Z","lastTransitionTime":"2025-11-25T16:48:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.948075 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.948114 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.948122 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.948136 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:00 crc kubenswrapper[4812]: I1125 16:48:00.948146 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:00Z","lastTransitionTime":"2025-11-25T16:48:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.051135 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.051236 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.051267 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.051303 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.051329 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:01Z","lastTransitionTime":"2025-11-25T16:48:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.153987 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.154036 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.154047 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.154062 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.154073 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:01Z","lastTransitionTime":"2025-11-25T16:48:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.258068 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.258115 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.258124 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.258143 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.258153 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:01Z","lastTransitionTime":"2025-11-25T16:48:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.361847 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.361913 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.361930 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.361954 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.361971 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:01Z","lastTransitionTime":"2025-11-25T16:48:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.466909 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.467022 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.467035 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.467062 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.467086 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:01Z","lastTransitionTime":"2025-11-25T16:48:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.571017 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.571074 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.571085 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.571107 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.571122 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:01Z","lastTransitionTime":"2025-11-25T16:48:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.674258 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.674323 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.674342 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.674371 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.674392 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:01Z","lastTransitionTime":"2025-11-25T16:48:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.779180 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.779271 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.779293 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.779327 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.779349 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:01Z","lastTransitionTime":"2025-11-25T16:48:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.831362 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.831360 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:48:01 crc kubenswrapper[4812]: E1125 16:48:01.831668 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.831361 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:48:01 crc kubenswrapper[4812]: E1125 16:48:01.831795 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:48:01 crc kubenswrapper[4812]: E1125 16:48:01.831944 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.882659 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.882735 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.882756 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.882784 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.882804 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:01Z","lastTransitionTime":"2025-11-25T16:48:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.986374 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.986424 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.986436 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.986459 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:01 crc kubenswrapper[4812]: I1125 16:48:01.986474 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:01Z","lastTransitionTime":"2025-11-25T16:48:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.089993 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.090061 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.090081 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.090114 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.090137 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:02Z","lastTransitionTime":"2025-11-25T16:48:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.193920 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.194008 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.194027 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.194049 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.194063 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:02Z","lastTransitionTime":"2025-11-25T16:48:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.297402 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.297478 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.297503 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.297573 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.297599 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:02Z","lastTransitionTime":"2025-11-25T16:48:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.400289 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.400324 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.400332 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.400346 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.400358 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:02Z","lastTransitionTime":"2025-11-25T16:48:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.503196 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.503276 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.503299 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.503335 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.503363 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:02Z","lastTransitionTime":"2025-11-25T16:48:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.606488 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.606566 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.606583 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.606606 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.606622 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:02Z","lastTransitionTime":"2025-11-25T16:48:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.709915 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.709973 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.709986 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.710007 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.710018 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:02Z","lastTransitionTime":"2025-11-25T16:48:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.812774 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.812840 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.812856 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.812877 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.812895 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:02Z","lastTransitionTime":"2025-11-25T16:48:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.830906 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:48:02 crc kubenswrapper[4812]: E1125 16:48:02.831160 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.911561 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.911610 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.911620 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.911637 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.911647 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:02Z","lastTransitionTime":"2025-11-25T16:48:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:02 crc kubenswrapper[4812]: E1125 16:48:02.926721 4812 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"42bff2ed-94cf-457c-8bcf-017111af962a\\\",\\\"systemUUID\\\":\\\"93542aec-3cae-4037-9cb4-28e49d8b2f68\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:02Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.931900 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.931954 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.931967 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.931986 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.931998 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:02Z","lastTransitionTime":"2025-11-25T16:48:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:02 crc kubenswrapper[4812]: E1125 16:48:02.945067 4812 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"42bff2ed-94cf-457c-8bcf-017111af962a\\\",\\\"systemUUID\\\":\\\"93542aec-3cae-4037-9cb4-28e49d8b2f68\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:02Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.949330 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.949360 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.949369 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.949383 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.949392 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:02Z","lastTransitionTime":"2025-11-25T16:48:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:02 crc kubenswrapper[4812]: E1125 16:48:02.965556 4812 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"42bff2ed-94cf-457c-8bcf-017111af962a\\\",\\\"systemUUID\\\":\\\"93542aec-3cae-4037-9cb4-28e49d8b2f68\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:02Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.975832 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.975895 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.975906 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.975927 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.975940 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:02Z","lastTransitionTime":"2025-11-25T16:48:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:02 crc kubenswrapper[4812]: E1125 16:48:02.992649 4812 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"42bff2ed-94cf-457c-8bcf-017111af962a\\\",\\\"systemUUID\\\":\\\"93542aec-3cae-4037-9cb4-28e49d8b2f68\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:02Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.999636 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.999698 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.999711 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.999735 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:02 crc kubenswrapper[4812]: I1125 16:48:02.999752 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:02Z","lastTransitionTime":"2025-11-25T16:48:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:03 crc kubenswrapper[4812]: E1125 16:48:03.013266 4812 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"42bff2ed-94cf-457c-8bcf-017111af962a\\\",\\\"systemUUID\\\":\\\"93542aec-3cae-4037-9cb4-28e49d8b2f68\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:03Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:03 crc kubenswrapper[4812]: E1125 16:48:03.013390 4812 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.015452 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.015613 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.015731 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.015872 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.015988 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:03Z","lastTransitionTime":"2025-11-25T16:48:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.119501 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.119648 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.119660 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.119682 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.119698 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:03Z","lastTransitionTime":"2025-11-25T16:48:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.222715 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.222814 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.222828 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.222860 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.222874 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:03Z","lastTransitionTime":"2025-11-25T16:48:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.325187 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.325235 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.325251 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.325273 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.325287 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:03Z","lastTransitionTime":"2025-11-25T16:48:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.428085 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.428133 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.428143 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.428159 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.428172 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:03Z","lastTransitionTime":"2025-11-25T16:48:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.530652 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.530707 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.530720 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.530749 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.530763 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:03Z","lastTransitionTime":"2025-11-25T16:48:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.633231 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.633285 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.633298 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.633321 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.633334 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:03Z","lastTransitionTime":"2025-11-25T16:48:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.736239 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.736342 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.736353 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.736373 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.736422 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:03Z","lastTransitionTime":"2025-11-25T16:48:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.831356 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.831420 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:48:03 crc kubenswrapper[4812]: E1125 16:48:03.831487 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.831459 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:48:03 crc kubenswrapper[4812]: E1125 16:48:03.831765 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:48:03 crc kubenswrapper[4812]: E1125 16:48:03.831867 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.838690 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.838726 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.838737 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.838755 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.838768 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:03Z","lastTransitionTime":"2025-11-25T16:48:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.941031 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.941073 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.941083 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.941099 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:03 crc kubenswrapper[4812]: I1125 16:48:03.941111 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:03Z","lastTransitionTime":"2025-11-25T16:48:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.044238 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.044282 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.044292 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.044311 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.044322 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:04Z","lastTransitionTime":"2025-11-25T16:48:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.146898 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.146936 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.146947 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.146961 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.146971 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:04Z","lastTransitionTime":"2025-11-25T16:48:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.249121 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.249159 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.249178 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.249196 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.249207 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:04Z","lastTransitionTime":"2025-11-25T16:48:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.351739 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.352343 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.352356 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.352369 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.352378 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:04Z","lastTransitionTime":"2025-11-25T16:48:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.455773 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.455855 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.455877 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.455912 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.455936 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:04Z","lastTransitionTime":"2025-11-25T16:48:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.558534 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.558624 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.558637 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.558656 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.558670 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:04Z","lastTransitionTime":"2025-11-25T16:48:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.661290 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.661337 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.661348 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.661372 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.661387 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:04Z","lastTransitionTime":"2025-11-25T16:48:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.764798 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.764851 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.764864 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.764887 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.764902 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:04Z","lastTransitionTime":"2025-11-25T16:48:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.831032 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:48:04 crc kubenswrapper[4812]: E1125 16:48:04.831286 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.868510 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.868583 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.868600 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.868622 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.868637 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:04Z","lastTransitionTime":"2025-11-25T16:48:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.971623 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.971670 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.971680 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.971697 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:04 crc kubenswrapper[4812]: I1125 16:48:04.971708 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:04Z","lastTransitionTime":"2025-11-25T16:48:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.075198 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.075236 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.075246 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.075262 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.075273 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:05Z","lastTransitionTime":"2025-11-25T16:48:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.177736 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.177805 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.177821 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.177848 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.177875 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:05Z","lastTransitionTime":"2025-11-25T16:48:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.280877 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.280935 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.280945 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.280966 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.280978 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:05Z","lastTransitionTime":"2025-11-25T16:48:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.383725 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.383753 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.383762 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.383776 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.383785 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:05Z","lastTransitionTime":"2025-11-25T16:48:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.486374 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.486415 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.486423 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.486442 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.486453 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:05Z","lastTransitionTime":"2025-11-25T16:48:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.588822 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.588853 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.588862 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.588874 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.588883 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:05Z","lastTransitionTime":"2025-11-25T16:48:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.691974 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.692013 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.692024 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.692059 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.692069 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:05Z","lastTransitionTime":"2025-11-25T16:48:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.794335 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.794390 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.794400 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.794421 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.794437 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:05Z","lastTransitionTime":"2025-11-25T16:48:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.830477 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.830577 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:48:05 crc kubenswrapper[4812]: E1125 16:48:05.830653 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:48:05 crc kubenswrapper[4812]: E1125 16:48:05.830744 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.830861 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:48:05 crc kubenswrapper[4812]: E1125 16:48:05.831086 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.845830 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ed911cf-2139-4b12-84ba-af635585ba29\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b0d9cef2c2ebce63dec5606b3f472a8ac27c7d5353c9e5ea12a9243e699ecb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lcgpx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:05Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.865298 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d398af8b4b16b15baa75821d0c30b6a72100aac425aca4e87fe9c1c2412673a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gljt8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:05Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.877226 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m7ndd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a156756-3629-4bed-8de0-1019226b7f04\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chd8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m7ndd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:05Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.887401 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-82fvc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbb57832-3993-492b-80c9-a6a61891a125\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8dqsg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8dqsg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:41Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-82fvc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:05Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.898047 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.898098 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.898125 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.898143 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.898155 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:05Z","lastTransitionTime":"2025-11-25T16:48:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.902592 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3da77f06-36fd-465a-b764-fabfaca65715\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc3351f45003a0574af7993746039012d2cdb2d17ac1dd8b1f8b629dd4d921f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://794035d1a2dbe8b5a1d28a4174ca6729fed6f236749008a13826d8541edd5a21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d11e018db2635e10a6a329a03047f13b3ddf47115077c36a596b76bdc43275c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c173b4b996674926da742cdbbfe34c03e6711cf1005fff5faf16a266cfffec0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:05Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.917417 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8c88eb3-2223-46c2-ab1c-470a48c53e7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2238540e1f8967fe1d9e64bbc0961e190c011025fe10f4f7757aaaa03f690117\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://396f152de0c7b552d1179bebce809cdcd8a51566b5a4c977b615df21191fd4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d869ab1ee5c083df5c5657f05d76a4e1ff0427f9b01b1eb26a0002d11ed5204\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5927f6ba428b58fbc20621d6c32b9205b2d33526975e335ed6f5c642481453f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e5927f6ba428b58fbc20621d6c32b9205b2d33526975e335ed6f5c642481453f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:05Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.930289 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:05Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.941390 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:05Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.951604 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:05Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.960642 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67213ffbc314ee091b39df4df290d4cbe30d310a724e99a5d797592d9b0a333c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:05Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.978628 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d370476c9f3acb7ba10ed410262960ab3d86cd6d4593f17c0081f82e9d2cabe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d370476c9f3acb7ba10ed410262960ab3d86cd6d4593f17c0081f82e9d2cabe\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T16:47:51Z\\\",\\\"message\\\":\\\"ork-check-target on namespace openshift-network-diagnostics for network=default : 6.828662ms\\\\nI1125 16:47:51.741645 6467 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}\\\\nI1125 16:47:51.741658 6467 services_controller.go:360] Finished syncing service metrics on namespace openshift-service-ca-operator for network=default : 7.967222ms\\\\nI1125 16:47:51.746551 6467 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 16:47:51.746632 6467 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 16:47:51.748460 6467 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 16:47:51.748606 6467 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1125 16:47:51.748659 6467 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 16:47:51.748732 6467 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 16:47:51.748798 6467 factory.go:656] Stopping watch factory\\\\nI1125 16:47:51.748841 6467 ovnkube.go:599] Stopped ovnkube\\\\nI1125 16:47:51.748904 6467 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 16:47:51.749041 6467 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:50Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hwqsk_openshift-ovn-kubernetes(bc4dc9ff-11a1-4151-91f0-3ff83020b3b9)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hwqsk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:05Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:05 crc kubenswrapper[4812]: I1125 16:48:05.992147 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:05Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.000585 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.000629 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.000640 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.000655 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.000667 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:06Z","lastTransitionTime":"2025-11-25T16:48:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.008173 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:06Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.020746 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78c00c81fd5911a4d89ffcb8e555dbe88604cc35b9095a07a9c4d791d26ec9ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:06Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.042762 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:06Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.075298 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c2ba033-91e8-49d8-8474-ebc2dfaeef15\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://179d394510edea595ed1ac39619911e2167f2b9cb5d228635609760932f428e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effc25d4eb530d685c8a7a0acebd25c418c3fc3675b367fe9ab49ae7c22a1cf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7668dab7cab029814b4edd7225c0a6fac9481f4e74f0ab906661b28775cdde45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://69a98172920b9a903cb0defe4630d0c6c1bb43d466ff352eb4d60e36fa18ea23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96c0e65aabd958b32841ea475624de4b58ff5c8690a5c767f6548872f6cc7ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:06Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.091419 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:06Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.102711 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.102741 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.102748 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.102764 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.102774 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:06Z","lastTransitionTime":"2025-11-25T16:48:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.103145 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd450cbc-02a7-4b95-8c0f-455df0f1f996\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b3be9778300f71493b3018d697ac9931b4825e96b56386e2d83ad9e75accbd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://067be8c1425967cc2944cc42abc9ab532d98fcc21d129ea57fab364c1aa83d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2fft\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:06Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.205789 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.205859 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.205871 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.205889 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.205900 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:06Z","lastTransitionTime":"2025-11-25T16:48:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.308430 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.308512 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.308523 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.308571 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.308589 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:06Z","lastTransitionTime":"2025-11-25T16:48:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.411276 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.411319 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.411327 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.411339 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.411349 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:06Z","lastTransitionTime":"2025-11-25T16:48:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.514301 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.514673 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.514685 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.514713 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.514725 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:06Z","lastTransitionTime":"2025-11-25T16:48:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.617479 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.617518 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.617526 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.617562 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.617572 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:06Z","lastTransitionTime":"2025-11-25T16:48:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.720367 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.720411 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.720422 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.720437 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.720446 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:06Z","lastTransitionTime":"2025-11-25T16:48:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.823502 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.823582 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.823602 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.823619 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.823638 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:06Z","lastTransitionTime":"2025-11-25T16:48:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.830517 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:48:06 crc kubenswrapper[4812]: E1125 16:48:06.830671 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.926638 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.926702 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.926713 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.926728 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:06 crc kubenswrapper[4812]: I1125 16:48:06.926738 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:06Z","lastTransitionTime":"2025-11-25T16:48:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.028912 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.028952 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.028963 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.028981 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.028992 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:07Z","lastTransitionTime":"2025-11-25T16:48:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.132213 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.132291 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.132314 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.132342 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.132364 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:07Z","lastTransitionTime":"2025-11-25T16:48:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.234327 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.234359 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.234367 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.234381 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.234390 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:07Z","lastTransitionTime":"2025-11-25T16:48:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.337320 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.337389 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.337399 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.337413 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.337421 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:07Z","lastTransitionTime":"2025-11-25T16:48:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.439992 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.440020 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.440029 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.440042 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.440052 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:07Z","lastTransitionTime":"2025-11-25T16:48:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.543244 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.543305 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.543314 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.543326 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.543334 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:07Z","lastTransitionTime":"2025-11-25T16:48:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.646836 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.646912 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.647158 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.647190 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.647209 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:07Z","lastTransitionTime":"2025-11-25T16:48:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.749628 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.749662 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.749671 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.749685 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.749695 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:07Z","lastTransitionTime":"2025-11-25T16:48:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.831202 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.831294 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:48:07 crc kubenswrapper[4812]: E1125 16:48:07.831419 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.831493 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:48:07 crc kubenswrapper[4812]: E1125 16:48:07.831664 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:48:07 crc kubenswrapper[4812]: E1125 16:48:07.831786 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.852113 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.852476 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.852562 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.852648 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.852727 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:07Z","lastTransitionTime":"2025-11-25T16:48:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.955333 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.955621 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.955697 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.955782 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:07 crc kubenswrapper[4812]: I1125 16:48:07.955867 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:07Z","lastTransitionTime":"2025-11-25T16:48:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.057864 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.057898 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.057906 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.057920 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.057929 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:08Z","lastTransitionTime":"2025-11-25T16:48:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.159679 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.159742 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.159751 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.159764 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.159773 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:08Z","lastTransitionTime":"2025-11-25T16:48:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.262305 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.262593 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.262685 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.262778 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.262844 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:08Z","lastTransitionTime":"2025-11-25T16:48:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.365200 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.365244 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.365255 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.365272 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.365282 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:08Z","lastTransitionTime":"2025-11-25T16:48:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.468083 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.468141 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.468154 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.468173 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.468186 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:08Z","lastTransitionTime":"2025-11-25T16:48:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.570014 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.570061 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.570105 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.570134 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.570148 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:08Z","lastTransitionTime":"2025-11-25T16:48:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.673009 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.673046 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.673082 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.673102 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.673111 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:08Z","lastTransitionTime":"2025-11-25T16:48:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.775050 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.775088 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.775099 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.775119 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.775131 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:08Z","lastTransitionTime":"2025-11-25T16:48:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.830704 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:48:08 crc kubenswrapper[4812]: E1125 16:48:08.830847 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.878029 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.878068 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.878079 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.878097 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.878109 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:08Z","lastTransitionTime":"2025-11-25T16:48:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.980311 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.980344 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.980353 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.980368 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:08 crc kubenswrapper[4812]: I1125 16:48:08.980380 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:08Z","lastTransitionTime":"2025-11-25T16:48:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.082636 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.082676 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.082688 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.082707 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.082720 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:09Z","lastTransitionTime":"2025-11-25T16:48:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.184949 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.184993 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.185003 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.185018 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.185030 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:09Z","lastTransitionTime":"2025-11-25T16:48:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.290506 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.290577 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.290587 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.290602 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.290633 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:09Z","lastTransitionTime":"2025-11-25T16:48:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.392853 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.392904 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.392914 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.392934 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.392946 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:09Z","lastTransitionTime":"2025-11-25T16:48:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.495741 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.495792 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.495804 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.495825 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.495839 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:09Z","lastTransitionTime":"2025-11-25T16:48:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.599197 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.599255 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.599265 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.599285 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.599297 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:09Z","lastTransitionTime":"2025-11-25T16:48:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.702170 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.702225 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.702237 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.702259 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.702274 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:09Z","lastTransitionTime":"2025-11-25T16:48:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.807399 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.807867 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.807945 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.808034 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.808103 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:09Z","lastTransitionTime":"2025-11-25T16:48:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.831587 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.831709 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:48:09 crc kubenswrapper[4812]: E1125 16:48:09.831740 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:48:09 crc kubenswrapper[4812]: E1125 16:48:09.831865 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.832122 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:48:09 crc kubenswrapper[4812]: E1125 16:48:09.832273 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.910615 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.911127 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.911223 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.911304 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:09 crc kubenswrapper[4812]: I1125 16:48:09.911368 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:09Z","lastTransitionTime":"2025-11-25T16:48:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.013905 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.013949 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.013961 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.013980 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.013994 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:10Z","lastTransitionTime":"2025-11-25T16:48:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.117297 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.117582 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.117685 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.117774 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.117897 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:10Z","lastTransitionTime":"2025-11-25T16:48:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.220366 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.220421 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.220433 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.220455 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.220469 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:10Z","lastTransitionTime":"2025-11-25T16:48:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.322815 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.322875 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.322889 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.322910 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.322922 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:10Z","lastTransitionTime":"2025-11-25T16:48:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.425708 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.425755 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.425766 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.425782 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.425793 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:10Z","lastTransitionTime":"2025-11-25T16:48:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.528124 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.528196 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.528215 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.528244 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.528271 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:10Z","lastTransitionTime":"2025-11-25T16:48:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.630704 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.630748 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.630759 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.630775 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.630784 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:10Z","lastTransitionTime":"2025-11-25T16:48:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.733971 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.734023 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.734034 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.734055 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.734066 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:10Z","lastTransitionTime":"2025-11-25T16:48:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.831381 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:48:10 crc kubenswrapper[4812]: E1125 16:48:10.831792 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.831989 4812 scope.go:117] "RemoveContainer" containerID="4d370476c9f3acb7ba10ed410262960ab3d86cd6d4593f17c0081f82e9d2cabe" Nov 25 16:48:10 crc kubenswrapper[4812]: E1125 16:48:10.832223 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hwqsk_openshift-ovn-kubernetes(bc4dc9ff-11a1-4151-91f0-3ff83020b3b9)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.836142 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.836193 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.836209 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.836231 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.836246 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:10Z","lastTransitionTime":"2025-11-25T16:48:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.938326 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.938363 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.938371 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.938384 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:10 crc kubenswrapper[4812]: I1125 16:48:10.938392 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:10Z","lastTransitionTime":"2025-11-25T16:48:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.040212 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.040247 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.040257 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.040271 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.040280 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:11Z","lastTransitionTime":"2025-11-25T16:48:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.142960 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.142998 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.143007 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.143022 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.143033 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:11Z","lastTransitionTime":"2025-11-25T16:48:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.245263 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.245333 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.245355 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.245380 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.245396 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:11Z","lastTransitionTime":"2025-11-25T16:48:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.347879 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.347912 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.347920 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.347933 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.347944 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:11Z","lastTransitionTime":"2025-11-25T16:48:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.450803 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.451083 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.451165 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.451236 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.451307 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:11Z","lastTransitionTime":"2025-11-25T16:48:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.553428 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.553470 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.553480 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.553494 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.553505 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:11Z","lastTransitionTime":"2025-11-25T16:48:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.656051 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.656108 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.656122 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.656137 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.656146 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:11Z","lastTransitionTime":"2025-11-25T16:48:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.758213 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.758267 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.758286 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.758308 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.758325 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:11Z","lastTransitionTime":"2025-11-25T16:48:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.831454 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:48:11 crc kubenswrapper[4812]: E1125 16:48:11.831612 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.831817 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:48:11 crc kubenswrapper[4812]: E1125 16:48:11.831906 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.831991 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:48:11 crc kubenswrapper[4812]: E1125 16:48:11.832158 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.860807 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.860866 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.860881 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.860901 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.860916 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:11Z","lastTransitionTime":"2025-11-25T16:48:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.963188 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.963307 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.963325 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.963346 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:11 crc kubenswrapper[4812]: I1125 16:48:11.963357 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:11Z","lastTransitionTime":"2025-11-25T16:48:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.065497 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.065549 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.065557 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.065571 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.065580 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:12Z","lastTransitionTime":"2025-11-25T16:48:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.167945 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.167972 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.167979 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.167991 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.168000 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:12Z","lastTransitionTime":"2025-11-25T16:48:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.270421 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.270464 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.270474 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.270487 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.270498 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:12Z","lastTransitionTime":"2025-11-25T16:48:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.372925 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.372972 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.372984 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.373003 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.373016 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:12Z","lastTransitionTime":"2025-11-25T16:48:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.475015 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.475054 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.475065 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.475084 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.475097 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:12Z","lastTransitionTime":"2025-11-25T16:48:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.577607 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.577663 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.577672 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.577687 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.577695 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:12Z","lastTransitionTime":"2025-11-25T16:48:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.679818 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.679858 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.679866 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.679880 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.679889 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:12Z","lastTransitionTime":"2025-11-25T16:48:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.782212 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.782251 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.782262 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.782279 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.782290 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:12Z","lastTransitionTime":"2025-11-25T16:48:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.831994 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:48:12 crc kubenswrapper[4812]: E1125 16:48:12.832145 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.885454 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.885494 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.885502 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.885527 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.885555 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:12Z","lastTransitionTime":"2025-11-25T16:48:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.988135 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.988163 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.988171 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.988182 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:12 crc kubenswrapper[4812]: I1125 16:48:12.988190 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:12Z","lastTransitionTime":"2025-11-25T16:48:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.090677 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.090729 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.090740 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.090756 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.090767 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:13Z","lastTransitionTime":"2025-11-25T16:48:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.193996 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.194060 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.194074 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.194092 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.194105 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:13Z","lastTransitionTime":"2025-11-25T16:48:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.266957 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.267035 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.267049 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.267070 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.267080 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:13Z","lastTransitionTime":"2025-11-25T16:48:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:13 crc kubenswrapper[4812]: E1125 16:48:13.283972 4812 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"42bff2ed-94cf-457c-8bcf-017111af962a\\\",\\\"systemUUID\\\":\\\"93542aec-3cae-4037-9cb4-28e49d8b2f68\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:13Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.287906 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.287943 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.287953 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.287968 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.287978 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:13Z","lastTransitionTime":"2025-11-25T16:48:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:13 crc kubenswrapper[4812]: E1125 16:48:13.300580 4812 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"42bff2ed-94cf-457c-8bcf-017111af962a\\\",\\\"systemUUID\\\":\\\"93542aec-3cae-4037-9cb4-28e49d8b2f68\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:13Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.304371 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.304434 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.304449 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.304467 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.304500 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:13Z","lastTransitionTime":"2025-11-25T16:48:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:13 crc kubenswrapper[4812]: E1125 16:48:13.319322 4812 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"42bff2ed-94cf-457c-8bcf-017111af962a\\\",\\\"systemUUID\\\":\\\"93542aec-3cae-4037-9cb4-28e49d8b2f68\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:13Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.323769 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.323852 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.323875 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.323905 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.323941 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:13Z","lastTransitionTime":"2025-11-25T16:48:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:13 crc kubenswrapper[4812]: E1125 16:48:13.338752 4812 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"42bff2ed-94cf-457c-8bcf-017111af962a\\\",\\\"systemUUID\\\":\\\"93542aec-3cae-4037-9cb4-28e49d8b2f68\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:13Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.342199 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.342235 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.342247 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.342264 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.342276 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:13Z","lastTransitionTime":"2025-11-25T16:48:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:13 crc kubenswrapper[4812]: E1125 16:48:13.357111 4812 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:13Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"42bff2ed-94cf-457c-8bcf-017111af962a\\\",\\\"systemUUID\\\":\\\"93542aec-3cae-4037-9cb4-28e49d8b2f68\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:13Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:13 crc kubenswrapper[4812]: E1125 16:48:13.357267 4812 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.359015 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.359057 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.359069 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.359088 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.359102 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:13Z","lastTransitionTime":"2025-11-25T16:48:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.421621 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fbb57832-3993-492b-80c9-a6a61891a125-metrics-certs\") pod \"network-metrics-daemon-82fvc\" (UID: \"fbb57832-3993-492b-80c9-a6a61891a125\") " pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:48:13 crc kubenswrapper[4812]: E1125 16:48:13.421750 4812 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 16:48:13 crc kubenswrapper[4812]: E1125 16:48:13.421832 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fbb57832-3993-492b-80c9-a6a61891a125-metrics-certs podName:fbb57832-3993-492b-80c9-a6a61891a125 nodeName:}" failed. No retries permitted until 2025-11-25 16:48:45.421812153 +0000 UTC m=+100.261954258 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fbb57832-3993-492b-80c9-a6a61891a125-metrics-certs") pod "network-metrics-daemon-82fvc" (UID: "fbb57832-3993-492b-80c9-a6a61891a125") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.461334 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.461373 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.461382 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.461398 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.461407 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:13Z","lastTransitionTime":"2025-11-25T16:48:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.563973 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.564009 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.564018 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.564034 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.564044 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:13Z","lastTransitionTime":"2025-11-25T16:48:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.666518 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.666580 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.666592 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.666608 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.666620 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:13Z","lastTransitionTime":"2025-11-25T16:48:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.768858 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.768904 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.768914 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.768928 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.768938 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:13Z","lastTransitionTime":"2025-11-25T16:48:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.830959 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.831015 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:48:13 crc kubenswrapper[4812]: E1125 16:48:13.831078 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:48:13 crc kubenswrapper[4812]: E1125 16:48:13.831141 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.831302 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:48:13 crc kubenswrapper[4812]: E1125 16:48:13.831350 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.870876 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.870906 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.870915 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.870928 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.870938 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:13Z","lastTransitionTime":"2025-11-25T16:48:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.972733 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.972768 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.972779 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.972797 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:13 crc kubenswrapper[4812]: I1125 16:48:13.972811 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:13Z","lastTransitionTime":"2025-11-25T16:48:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.075376 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.075405 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.075414 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.075427 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.075435 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:14Z","lastTransitionTime":"2025-11-25T16:48:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.177358 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.177404 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.177415 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.177433 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.177448 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:14Z","lastTransitionTime":"2025-11-25T16:48:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.280337 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.280418 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.280436 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.280467 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.280487 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:14Z","lastTransitionTime":"2025-11-25T16:48:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.382421 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.382492 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.382503 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.382528 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.382586 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:14Z","lastTransitionTime":"2025-11-25T16:48:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.484515 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.484569 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.484579 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.484593 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.484601 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:14Z","lastTransitionTime":"2025-11-25T16:48:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.587093 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.587134 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.587145 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.587164 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.587176 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:14Z","lastTransitionTime":"2025-11-25T16:48:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.689437 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.689472 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.689481 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.689498 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.689517 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:14Z","lastTransitionTime":"2025-11-25T16:48:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.792097 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.792128 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.792136 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.792152 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.792161 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:14Z","lastTransitionTime":"2025-11-25T16:48:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.830746 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:48:14 crc kubenswrapper[4812]: E1125 16:48:14.830909 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.894799 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.894852 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.894866 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.894886 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.894898 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:14Z","lastTransitionTime":"2025-11-25T16:48:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.997963 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.997999 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.998007 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.998022 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:14 crc kubenswrapper[4812]: I1125 16:48:14.998031 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:14Z","lastTransitionTime":"2025-11-25T16:48:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.100619 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.100645 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.100653 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.100664 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.100676 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:15Z","lastTransitionTime":"2025-11-25T16:48:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.202808 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.202841 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.202851 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.202866 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.202876 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:15Z","lastTransitionTime":"2025-11-25T16:48:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.305219 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.305474 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.305578 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.305665 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.305731 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:15Z","lastTransitionTime":"2025-11-25T16:48:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.407413 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.407453 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.407465 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.407483 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.407496 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:15Z","lastTransitionTime":"2025-11-25T16:48:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.509240 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.509275 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.509284 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.509299 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.509310 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:15Z","lastTransitionTime":"2025-11-25T16:48:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.611196 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.611249 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.611258 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.611273 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.611282 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:15Z","lastTransitionTime":"2025-11-25T16:48:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.713359 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.713421 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.713434 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.713449 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.713461 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:15Z","lastTransitionTime":"2025-11-25T16:48:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.815954 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.815992 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.816004 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.816019 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.816029 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:15Z","lastTransitionTime":"2025-11-25T16:48:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.830745 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:48:15 crc kubenswrapper[4812]: E1125 16:48:15.830826 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.830944 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:48:15 crc kubenswrapper[4812]: E1125 16:48:15.830992 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.831103 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:48:15 crc kubenswrapper[4812]: E1125 16:48:15.831160 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.845550 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:15Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.857277 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78c00c81fd5911a4d89ffcb8e555dbe88604cc35b9095a07a9c4d791d26ec9ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:15Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.866627 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:15Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.875263 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67213ffbc314ee091b39df4df290d4cbe30d310a724e99a5d797592d9b0a333c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:15Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.891781 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d370476c9f3acb7ba10ed410262960ab3d86cd6d4593f17c0081f82e9d2cabe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d370476c9f3acb7ba10ed410262960ab3d86cd6d4593f17c0081f82e9d2cabe\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T16:47:51Z\\\",\\\"message\\\":\\\"ork-check-target on namespace openshift-network-diagnostics for network=default : 6.828662ms\\\\nI1125 16:47:51.741645 6467 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}\\\\nI1125 16:47:51.741658 6467 services_controller.go:360] Finished syncing service metrics on namespace openshift-service-ca-operator for network=default : 7.967222ms\\\\nI1125 16:47:51.746551 6467 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 16:47:51.746632 6467 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 16:47:51.748460 6467 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 16:47:51.748606 6467 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1125 16:47:51.748659 6467 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 16:47:51.748732 6467 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 16:47:51.748798 6467 factory.go:656] Stopping watch factory\\\\nI1125 16:47:51.748841 6467 ovnkube.go:599] Stopped ovnkube\\\\nI1125 16:47:51.748904 6467 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 16:47:51.749041 6467 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:50Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hwqsk_openshift-ovn-kubernetes(bc4dc9ff-11a1-4151-91f0-3ff83020b3b9)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hwqsk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:15Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.902772 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:15Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.915053 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:15Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.918203 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.918241 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.918255 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.918273 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.918286 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:15Z","lastTransitionTime":"2025-11-25T16:48:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.926215 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd450cbc-02a7-4b95-8c0f-455df0f1f996\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b3be9778300f71493b3018d697ac9931b4825e96b56386e2d83ad9e75accbd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://067be8c1425967cc2944cc42abc9ab532d98fcc21d129ea57fab364c1aa83d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2fft\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:15Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.946887 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c2ba033-91e8-49d8-8474-ebc2dfaeef15\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://179d394510edea595ed1ac39619911e2167f2b9cb5d228635609760932f428e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effc25d4eb530d685c8a7a0acebd25c418c3fc3675b367fe9ab49ae7c22a1cf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7668dab7cab029814b4edd7225c0a6fac9481f4e74f0ab906661b28775cdde45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://69a98172920b9a903cb0defe4630d0c6c1bb43d466ff352eb4d60e36fa18ea23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96c0e65aabd958b32841ea475624de4b58ff5c8690a5c767f6548872f6cc7ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:15Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.961988 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:15Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.973970 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:15Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.983983 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:15Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:15 crc kubenswrapper[4812]: I1125 16:48:15.994270 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ed911cf-2139-4b12-84ba-af635585ba29\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b0d9cef2c2ebce63dec5606b3f472a8ac27c7d5353c9e5ea12a9243e699ecb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lcgpx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:15Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.008613 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d398af8b4b16b15baa75821d0c30b6a72100aac425aca4e87fe9c1c2412673a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gljt8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:16Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.019811 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m7ndd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a156756-3629-4bed-8de0-1019226b7f04\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chd8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m7ndd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:16Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.020049 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.020077 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.020086 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.020099 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.020108 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:16Z","lastTransitionTime":"2025-11-25T16:48:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.030822 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-82fvc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbb57832-3993-492b-80c9-a6a61891a125\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8dqsg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8dqsg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:41Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-82fvc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:16Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.042099 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3da77f06-36fd-465a-b764-fabfaca65715\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc3351f45003a0574af7993746039012d2cdb2d17ac1dd8b1f8b629dd4d921f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://794035d1a2dbe8b5a1d28a4174ca6729fed6f236749008a13826d8541edd5a21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d11e018db2635e10a6a329a03047f13b3ddf47115077c36a596b76bdc43275c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c173b4b996674926da742cdbbfe34c03e6711cf1005fff5faf16a266cfffec0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:16Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.051962 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8c88eb3-2223-46c2-ab1c-470a48c53e7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2238540e1f8967fe1d9e64bbc0961e190c011025fe10f4f7757aaaa03f690117\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://396f152de0c7b552d1179bebce809cdcd8a51566b5a4c977b615df21191fd4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d869ab1ee5c083df5c5657f05d76a4e1ff0427f9b01b1eb26a0002d11ed5204\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5927f6ba428b58fbc20621d6c32b9205b2d33526975e335ed6f5c642481453f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e5927f6ba428b58fbc20621d6c32b9205b2d33526975e335ed6f5c642481453f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:16Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.121899 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.121974 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.121986 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.122004 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.122018 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:16Z","lastTransitionTime":"2025-11-25T16:48:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.224229 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.224271 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.224291 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.224305 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.224315 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:16Z","lastTransitionTime":"2025-11-25T16:48:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.326790 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.326832 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.326841 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.326856 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.326867 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:16Z","lastTransitionTime":"2025-11-25T16:48:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.428702 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.428747 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.428761 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.428777 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.428805 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:16Z","lastTransitionTime":"2025-11-25T16:48:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.531597 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.531631 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.531640 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.531654 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.531662 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:16Z","lastTransitionTime":"2025-11-25T16:48:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.634415 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.634449 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.634458 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.634472 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.634481 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:16Z","lastTransitionTime":"2025-11-25T16:48:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.736000 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.736223 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.736305 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.736373 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.736434 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:16Z","lastTransitionTime":"2025-11-25T16:48:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.831003 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:48:16 crc kubenswrapper[4812]: E1125 16:48:16.831304 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.839265 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.839301 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.839313 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.839326 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.839390 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:16Z","lastTransitionTime":"2025-11-25T16:48:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.941734 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.941778 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.941788 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.941805 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:16 crc kubenswrapper[4812]: I1125 16:48:16.941817 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:16Z","lastTransitionTime":"2025-11-25T16:48:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.044338 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.044390 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.044403 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.044425 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.044440 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:17Z","lastTransitionTime":"2025-11-25T16:48:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.147290 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.147343 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.147355 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.147374 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.147386 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:17Z","lastTransitionTime":"2025-11-25T16:48:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.179693 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-m7ndd_3a156756-3629-4bed-8de0-1019226b7f04/kube-multus/0.log" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.179740 4812 generic.go:334] "Generic (PLEG): container finished" podID="3a156756-3629-4bed-8de0-1019226b7f04" containerID="720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824" exitCode=1 Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.179773 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-m7ndd" event={"ID":"3a156756-3629-4bed-8de0-1019226b7f04","Type":"ContainerDied","Data":"720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824"} Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.180120 4812 scope.go:117] "RemoveContainer" containerID="720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.201335 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c2ba033-91e8-49d8-8474-ebc2dfaeef15\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://179d394510edea595ed1ac39619911e2167f2b9cb5d228635609760932f428e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effc25d4eb530d685c8a7a0acebd25c418c3fc3675b367fe9ab49ae7c22a1cf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7668dab7cab029814b4edd7225c0a6fac9481f4e74f0ab906661b28775cdde45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://69a98172920b9a903cb0defe4630d0c6c1bb43d466ff352eb4d60e36fa18ea23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96c0e65aabd958b32841ea475624de4b58ff5c8690a5c767f6548872f6cc7ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:17Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.216030 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:17Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.227516 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd450cbc-02a7-4b95-8c0f-455df0f1f996\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b3be9778300f71493b3018d697ac9931b4825e96b56386e2d83ad9e75accbd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://067be8c1425967cc2944cc42abc9ab532d98fcc21d129ea57fab364c1aa83d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2fft\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:17Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.241615 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m7ndd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a156756-3629-4bed-8de0-1019226b7f04\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:17Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T16:48:16Z\\\",\\\"message\\\":\\\"2025-11-25T16:47:31+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_de7444d5-6a19-45c4-a479-441123e1e3b9\\\\n2025-11-25T16:47:31+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_de7444d5-6a19-45c4-a479-441123e1e3b9 to /host/opt/cni/bin/\\\\n2025-11-25T16:47:31Z [verbose] multus-daemon started\\\\n2025-11-25T16:47:31Z [verbose] Readiness Indicator file check\\\\n2025-11-25T16:48:16Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chd8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m7ndd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:17Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.249560 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.249705 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.249768 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.249832 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.249908 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:17Z","lastTransitionTime":"2025-11-25T16:48:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.253169 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-82fvc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbb57832-3993-492b-80c9-a6a61891a125\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8dqsg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8dqsg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:41Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-82fvc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:17Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.267853 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3da77f06-36fd-465a-b764-fabfaca65715\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc3351f45003a0574af7993746039012d2cdb2d17ac1dd8b1f8b629dd4d921f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://794035d1a2dbe8b5a1d28a4174ca6729fed6f236749008a13826d8541edd5a21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d11e018db2635e10a6a329a03047f13b3ddf47115077c36a596b76bdc43275c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c173b4b996674926da742cdbbfe34c03e6711cf1005fff5faf16a266cfffec0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:17Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.281551 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8c88eb3-2223-46c2-ab1c-470a48c53e7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2238540e1f8967fe1d9e64bbc0961e190c011025fe10f4f7757aaaa03f690117\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://396f152de0c7b552d1179bebce809cdcd8a51566b5a4c977b615df21191fd4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d869ab1ee5c083df5c5657f05d76a4e1ff0427f9b01b1eb26a0002d11ed5204\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5927f6ba428b58fbc20621d6c32b9205b2d33526975e335ed6f5c642481453f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e5927f6ba428b58fbc20621d6c32b9205b2d33526975e335ed6f5c642481453f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:17Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.294509 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:17Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.307886 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:17Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.318864 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ed911cf-2139-4b12-84ba-af635585ba29\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b0d9cef2c2ebce63dec5606b3f472a8ac27c7d5353c9e5ea12a9243e699ecb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lcgpx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:17Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.331226 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d398af8b4b16b15baa75821d0c30b6a72100aac425aca4e87fe9c1c2412673a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gljt8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:17Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.349152 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d370476c9f3acb7ba10ed410262960ab3d86cd6d4593f17c0081f82e9d2cabe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d370476c9f3acb7ba10ed410262960ab3d86cd6d4593f17c0081f82e9d2cabe\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T16:47:51Z\\\",\\\"message\\\":\\\"ork-check-target on namespace openshift-network-diagnostics for network=default : 6.828662ms\\\\nI1125 16:47:51.741645 6467 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}\\\\nI1125 16:47:51.741658 6467 services_controller.go:360] Finished syncing service metrics on namespace openshift-service-ca-operator for network=default : 7.967222ms\\\\nI1125 16:47:51.746551 6467 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 16:47:51.746632 6467 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 16:47:51.748460 6467 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 16:47:51.748606 6467 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1125 16:47:51.748659 6467 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 16:47:51.748732 6467 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 16:47:51.748798 6467 factory.go:656] Stopping watch factory\\\\nI1125 16:47:51.748841 6467 ovnkube.go:599] Stopped ovnkube\\\\nI1125 16:47:51.748904 6467 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 16:47:51.749041 6467 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:50Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hwqsk_openshift-ovn-kubernetes(bc4dc9ff-11a1-4151-91f0-3ff83020b3b9)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hwqsk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:17Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.352938 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.352984 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.352993 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.353009 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.353021 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:17Z","lastTransitionTime":"2025-11-25T16:48:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.361853 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:17Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.375270 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:17Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.387981 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78c00c81fd5911a4d89ffcb8e555dbe88604cc35b9095a07a9c4d791d26ec9ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:17Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.402627 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:17Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.412018 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67213ffbc314ee091b39df4df290d4cbe30d310a724e99a5d797592d9b0a333c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:17Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.422159 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:17Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.454474 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.454514 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.454546 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.454563 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.454574 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:17Z","lastTransitionTime":"2025-11-25T16:48:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.557173 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.557239 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.557252 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.557271 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.557282 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:17Z","lastTransitionTime":"2025-11-25T16:48:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.659077 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.659405 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.659425 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.659441 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.659452 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:17Z","lastTransitionTime":"2025-11-25T16:48:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.761811 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.761855 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.761867 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.761885 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.761897 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:17Z","lastTransitionTime":"2025-11-25T16:48:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.831601 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.831649 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:48:17 crc kubenswrapper[4812]: E1125 16:48:17.831813 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.831860 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:48:17 crc kubenswrapper[4812]: E1125 16:48:17.831975 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:48:17 crc kubenswrapper[4812]: E1125 16:48:17.832100 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.864037 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.864094 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.864117 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.864142 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.864160 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:17Z","lastTransitionTime":"2025-11-25T16:48:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.966331 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.966366 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.966374 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.966386 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:17 crc kubenswrapper[4812]: I1125 16:48:17.966395 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:17Z","lastTransitionTime":"2025-11-25T16:48:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.068970 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.069011 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.069020 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.069034 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.069044 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:18Z","lastTransitionTime":"2025-11-25T16:48:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.170987 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.171024 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.171034 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.171051 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.171064 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:18Z","lastTransitionTime":"2025-11-25T16:48:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.183923 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-m7ndd_3a156756-3629-4bed-8de0-1019226b7f04/kube-multus/0.log" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.184106 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-m7ndd" event={"ID":"3a156756-3629-4bed-8de0-1019226b7f04","Type":"ContainerStarted","Data":"1eaf67b5a5abeace1075cd58bc8f217c42feab58eddaa10ddf6969b850c3f9f3"} Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.197470 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:18Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.208292 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd450cbc-02a7-4b95-8c0f-455df0f1f996\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b3be9778300f71493b3018d697ac9931b4825e96b56386e2d83ad9e75accbd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://067be8c1425967cc2944cc42abc9ab532d98fcc21d129ea57fab364c1aa83d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2fft\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:18Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.236959 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c2ba033-91e8-49d8-8474-ebc2dfaeef15\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://179d394510edea595ed1ac39619911e2167f2b9cb5d228635609760932f428e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effc25d4eb530d685c8a7a0acebd25c418c3fc3675b367fe9ab49ae7c22a1cf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7668dab7cab029814b4edd7225c0a6fac9481f4e74f0ab906661b28775cdde45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://69a98172920b9a903cb0defe4630d0c6c1bb43d466ff352eb4d60e36fa18ea23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96c0e65aabd958b32841ea475624de4b58ff5c8690a5c767f6548872f6cc7ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:18Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.252791 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:18Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.264618 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:18Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.273516 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.273689 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.273775 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.273907 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.274001 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:18Z","lastTransitionTime":"2025-11-25T16:48:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.275636 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:18Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.284177 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ed911cf-2139-4b12-84ba-af635585ba29\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b0d9cef2c2ebce63dec5606b3f472a8ac27c7d5353c9e5ea12a9243e699ecb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lcgpx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:18Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.296791 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d398af8b4b16b15baa75821d0c30b6a72100aac425aca4e87fe9c1c2412673a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gljt8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:18Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.309237 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m7ndd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a156756-3629-4bed-8de0-1019226b7f04\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1eaf67b5a5abeace1075cd58bc8f217c42feab58eddaa10ddf6969b850c3f9f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T16:48:16Z\\\",\\\"message\\\":\\\"2025-11-25T16:47:31+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_de7444d5-6a19-45c4-a479-441123e1e3b9\\\\n2025-11-25T16:47:31+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_de7444d5-6a19-45c4-a479-441123e1e3b9 to /host/opt/cni/bin/\\\\n2025-11-25T16:47:31Z [verbose] multus-daemon started\\\\n2025-11-25T16:47:31Z [verbose] Readiness Indicator file check\\\\n2025-11-25T16:48:16Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:48:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chd8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m7ndd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:18Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.318035 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-82fvc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbb57832-3993-492b-80c9-a6a61891a125\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8dqsg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8dqsg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:41Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-82fvc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:18Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.331449 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3da77f06-36fd-465a-b764-fabfaca65715\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc3351f45003a0574af7993746039012d2cdb2d17ac1dd8b1f8b629dd4d921f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://794035d1a2dbe8b5a1d28a4174ca6729fed6f236749008a13826d8541edd5a21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d11e018db2635e10a6a329a03047f13b3ddf47115077c36a596b76bdc43275c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c173b4b996674926da742cdbbfe34c03e6711cf1005fff5faf16a266cfffec0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:18Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.344306 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8c88eb3-2223-46c2-ab1c-470a48c53e7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2238540e1f8967fe1d9e64bbc0961e190c011025fe10f4f7757aaaa03f690117\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://396f152de0c7b552d1179bebce809cdcd8a51566b5a4c977b615df21191fd4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d869ab1ee5c083df5c5657f05d76a4e1ff0427f9b01b1eb26a0002d11ed5204\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5927f6ba428b58fbc20621d6c32b9205b2d33526975e335ed6f5c642481453f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e5927f6ba428b58fbc20621d6c32b9205b2d33526975e335ed6f5c642481453f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:18Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.356312 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:18Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.367744 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78c00c81fd5911a4d89ffcb8e555dbe88604cc35b9095a07a9c4d791d26ec9ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:18Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.376559 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.376686 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.376777 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.376868 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.376968 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:18Z","lastTransitionTime":"2025-11-25T16:48:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.381279 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:18Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.392221 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67213ffbc314ee091b39df4df290d4cbe30d310a724e99a5d797592d9b0a333c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:18Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.409206 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d370476c9f3acb7ba10ed410262960ab3d86cd6d4593f17c0081f82e9d2cabe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d370476c9f3acb7ba10ed410262960ab3d86cd6d4593f17c0081f82e9d2cabe\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T16:47:51Z\\\",\\\"message\\\":\\\"ork-check-target on namespace openshift-network-diagnostics for network=default : 6.828662ms\\\\nI1125 16:47:51.741645 6467 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}\\\\nI1125 16:47:51.741658 6467 services_controller.go:360] Finished syncing service metrics on namespace openshift-service-ca-operator for network=default : 7.967222ms\\\\nI1125 16:47:51.746551 6467 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 16:47:51.746632 6467 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 16:47:51.748460 6467 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 16:47:51.748606 6467 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1125 16:47:51.748659 6467 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 16:47:51.748732 6467 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 16:47:51.748798 6467 factory.go:656] Stopping watch factory\\\\nI1125 16:47:51.748841 6467 ovnkube.go:599] Stopped ovnkube\\\\nI1125 16:47:51.748904 6467 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 16:47:51.749041 6467 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:50Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hwqsk_openshift-ovn-kubernetes(bc4dc9ff-11a1-4151-91f0-3ff83020b3b9)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hwqsk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:18Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.421418 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:18Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.479441 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.479492 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.479509 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.479558 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.479575 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:18Z","lastTransitionTime":"2025-11-25T16:48:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.585449 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.585504 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.585513 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.585546 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.585558 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:18Z","lastTransitionTime":"2025-11-25T16:48:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.687575 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.687619 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.687629 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.687645 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.687655 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:18Z","lastTransitionTime":"2025-11-25T16:48:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.789769 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.789810 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.789821 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.789837 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.789847 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:18Z","lastTransitionTime":"2025-11-25T16:48:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.830665 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:48:18 crc kubenswrapper[4812]: E1125 16:48:18.830791 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.892648 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.892693 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.892704 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.892721 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.892734 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:18Z","lastTransitionTime":"2025-11-25T16:48:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.994425 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.994456 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.994464 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.994477 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:18 crc kubenswrapper[4812]: I1125 16:48:18.994486 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:18Z","lastTransitionTime":"2025-11-25T16:48:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.096234 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.096284 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.096295 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.096311 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.096324 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:19Z","lastTransitionTime":"2025-11-25T16:48:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.198434 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.198475 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.198486 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.198502 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.198513 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:19Z","lastTransitionTime":"2025-11-25T16:48:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.300445 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.300491 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.300501 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.300519 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.300551 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:19Z","lastTransitionTime":"2025-11-25T16:48:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.402846 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.402886 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.402896 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.402908 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.402917 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:19Z","lastTransitionTime":"2025-11-25T16:48:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.505844 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.505886 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.505895 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.505911 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.505922 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:19Z","lastTransitionTime":"2025-11-25T16:48:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.608835 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.608879 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.608891 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.608907 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.608918 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:19Z","lastTransitionTime":"2025-11-25T16:48:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.711042 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.711090 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.711100 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.711118 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.711130 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:19Z","lastTransitionTime":"2025-11-25T16:48:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.813372 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.813416 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.813428 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.813442 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.813451 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:19Z","lastTransitionTime":"2025-11-25T16:48:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.831025 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.831066 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:48:19 crc kubenswrapper[4812]: E1125 16:48:19.831131 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.831217 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:48:19 crc kubenswrapper[4812]: E1125 16:48:19.831366 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:48:19 crc kubenswrapper[4812]: E1125 16:48:19.831472 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.916191 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.916426 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.916531 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.916610 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:19 crc kubenswrapper[4812]: I1125 16:48:19.916667 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:19Z","lastTransitionTime":"2025-11-25T16:48:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.018964 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.019013 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.019024 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.019041 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.019054 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:20Z","lastTransitionTime":"2025-11-25T16:48:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.121101 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.121157 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.121178 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.121196 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.121207 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:20Z","lastTransitionTime":"2025-11-25T16:48:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.223336 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.223394 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.223403 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.223417 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.223431 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:20Z","lastTransitionTime":"2025-11-25T16:48:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.325452 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.325498 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.325506 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.325520 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.325555 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:20Z","lastTransitionTime":"2025-11-25T16:48:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.428664 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.428731 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.428752 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.428774 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.428791 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:20Z","lastTransitionTime":"2025-11-25T16:48:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.530871 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.530906 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.530915 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.530930 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.530939 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:20Z","lastTransitionTime":"2025-11-25T16:48:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.633603 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.633640 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.633651 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.633665 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.633674 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:20Z","lastTransitionTime":"2025-11-25T16:48:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.736117 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.736458 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.736657 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.736811 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.736949 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:20Z","lastTransitionTime":"2025-11-25T16:48:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.831417 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:48:20 crc kubenswrapper[4812]: E1125 16:48:20.831653 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.843624 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.843695 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.843724 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.843748 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.843765 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:20Z","lastTransitionTime":"2025-11-25T16:48:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.946819 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.946857 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.946868 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.946885 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:20 crc kubenswrapper[4812]: I1125 16:48:20.946897 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:20Z","lastTransitionTime":"2025-11-25T16:48:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.050158 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.050236 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.050260 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.050288 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.050309 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:21Z","lastTransitionTime":"2025-11-25T16:48:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.153290 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.153367 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.153391 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.153422 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.153443 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:21Z","lastTransitionTime":"2025-11-25T16:48:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.256298 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.256370 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.256392 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.256420 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.256442 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:21Z","lastTransitionTime":"2025-11-25T16:48:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.359081 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.359129 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.359139 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.359155 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.359165 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:21Z","lastTransitionTime":"2025-11-25T16:48:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.461783 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.461831 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.461842 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.461857 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.461868 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:21Z","lastTransitionTime":"2025-11-25T16:48:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.564238 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.564279 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.564292 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.564311 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.564323 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:21Z","lastTransitionTime":"2025-11-25T16:48:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.667259 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.667306 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.667372 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.667414 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.667436 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:21Z","lastTransitionTime":"2025-11-25T16:48:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.769137 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.769171 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.769179 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.769192 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.769201 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:21Z","lastTransitionTime":"2025-11-25T16:48:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.831022 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.831098 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.831022 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:48:21 crc kubenswrapper[4812]: E1125 16:48:21.831241 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:48:21 crc kubenswrapper[4812]: E1125 16:48:21.831428 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:48:21 crc kubenswrapper[4812]: E1125 16:48:21.831517 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.872161 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.872203 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.872216 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.872233 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.872245 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:21Z","lastTransitionTime":"2025-11-25T16:48:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.974574 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.974634 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.974643 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.974660 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:21 crc kubenswrapper[4812]: I1125 16:48:21.974670 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:21Z","lastTransitionTime":"2025-11-25T16:48:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.077444 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.077490 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.077501 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.077521 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.077550 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:22Z","lastTransitionTime":"2025-11-25T16:48:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.180485 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.180551 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.180562 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.180583 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.180597 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:22Z","lastTransitionTime":"2025-11-25T16:48:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.283021 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.283069 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.283080 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.283101 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.283115 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:22Z","lastTransitionTime":"2025-11-25T16:48:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.385617 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.385659 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.385688 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.385708 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.385721 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:22Z","lastTransitionTime":"2025-11-25T16:48:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.488294 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.488350 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.488360 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.488380 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.488392 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:22Z","lastTransitionTime":"2025-11-25T16:48:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.590851 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.590895 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.590907 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.590923 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.590934 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:22Z","lastTransitionTime":"2025-11-25T16:48:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.693925 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.693966 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.693975 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.693988 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.693996 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:22Z","lastTransitionTime":"2025-11-25T16:48:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.796396 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.796450 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.796466 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.796485 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.796497 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:22Z","lastTransitionTime":"2025-11-25T16:48:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.830453 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:48:22 crc kubenswrapper[4812]: E1125 16:48:22.830599 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.899292 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.899331 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.899343 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.899363 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:22 crc kubenswrapper[4812]: I1125 16:48:22.899378 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:22Z","lastTransitionTime":"2025-11-25T16:48:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.002041 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.002102 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.002119 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.002140 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.002156 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:23Z","lastTransitionTime":"2025-11-25T16:48:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.105689 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.105745 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.105765 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.105789 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.105806 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:23Z","lastTransitionTime":"2025-11-25T16:48:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.208221 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.208267 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.208281 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.208301 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.208315 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:23Z","lastTransitionTime":"2025-11-25T16:48:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.315275 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.315354 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.315685 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.315706 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.315719 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:23Z","lastTransitionTime":"2025-11-25T16:48:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.418665 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.418732 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.418756 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.418787 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.418811 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:23Z","lastTransitionTime":"2025-11-25T16:48:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.521566 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.521611 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.521622 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.521638 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.521651 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:23Z","lastTransitionTime":"2025-11-25T16:48:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.624641 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.624702 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.624713 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.624731 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.624742 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:23Z","lastTransitionTime":"2025-11-25T16:48:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.693149 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.693224 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.693242 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.693269 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.693288 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:23Z","lastTransitionTime":"2025-11-25T16:48:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:23 crc kubenswrapper[4812]: E1125 16:48:23.708718 4812 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"42bff2ed-94cf-457c-8bcf-017111af962a\\\",\\\"systemUUID\\\":\\\"93542aec-3cae-4037-9cb4-28e49d8b2f68\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:23Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.713892 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.713945 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.714008 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.714037 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.714108 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:23Z","lastTransitionTime":"2025-11-25T16:48:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:23 crc kubenswrapper[4812]: E1125 16:48:23.727794 4812 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"42bff2ed-94cf-457c-8bcf-017111af962a\\\",\\\"systemUUID\\\":\\\"93542aec-3cae-4037-9cb4-28e49d8b2f68\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:23Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.731850 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.731900 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.731912 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.731928 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.731938 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:23Z","lastTransitionTime":"2025-11-25T16:48:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:23 crc kubenswrapper[4812]: E1125 16:48:23.742724 4812 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"42bff2ed-94cf-457c-8bcf-017111af962a\\\",\\\"systemUUID\\\":\\\"93542aec-3cae-4037-9cb4-28e49d8b2f68\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:23Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.746698 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.746758 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.746773 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.746794 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.746810 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:23Z","lastTransitionTime":"2025-11-25T16:48:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:23 crc kubenswrapper[4812]: E1125 16:48:23.760041 4812 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"42bff2ed-94cf-457c-8bcf-017111af962a\\\",\\\"systemUUID\\\":\\\"93542aec-3cae-4037-9cb4-28e49d8b2f68\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:23Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.764117 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.764162 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.764170 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.764190 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.764202 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:23Z","lastTransitionTime":"2025-11-25T16:48:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:23 crc kubenswrapper[4812]: E1125 16:48:23.776922 4812 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"42bff2ed-94cf-457c-8bcf-017111af962a\\\",\\\"systemUUID\\\":\\\"93542aec-3cae-4037-9cb4-28e49d8b2f68\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:23Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:23 crc kubenswrapper[4812]: E1125 16:48:23.777109 4812 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.778995 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.779042 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.779052 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.779068 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.779078 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:23Z","lastTransitionTime":"2025-11-25T16:48:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.830718 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.830778 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.830792 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:48:23 crc kubenswrapper[4812]: E1125 16:48:23.830855 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:48:23 crc kubenswrapper[4812]: E1125 16:48:23.831132 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:48:23 crc kubenswrapper[4812]: E1125 16:48:23.831187 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.881816 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.881885 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.881902 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.881930 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.881944 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:23Z","lastTransitionTime":"2025-11-25T16:48:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.985143 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.985186 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.985197 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.985215 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:23 crc kubenswrapper[4812]: I1125 16:48:23.985226 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:23Z","lastTransitionTime":"2025-11-25T16:48:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.088676 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.088737 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.088754 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.088781 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.088799 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:24Z","lastTransitionTime":"2025-11-25T16:48:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.191504 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.191576 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.191585 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.191598 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.191607 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:24Z","lastTransitionTime":"2025-11-25T16:48:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.293932 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.293983 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.293994 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.294009 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.294019 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:24Z","lastTransitionTime":"2025-11-25T16:48:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.396114 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.396146 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.396154 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.396168 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.396179 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:24Z","lastTransitionTime":"2025-11-25T16:48:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.499356 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.499410 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.499422 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.499442 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.499454 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:24Z","lastTransitionTime":"2025-11-25T16:48:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.601796 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.601843 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.601851 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.601865 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.601874 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:24Z","lastTransitionTime":"2025-11-25T16:48:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.704433 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.704564 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.704574 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.704594 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.704611 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:24Z","lastTransitionTime":"2025-11-25T16:48:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.807102 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.807180 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.807206 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.807237 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.807260 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:24Z","lastTransitionTime":"2025-11-25T16:48:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.830571 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:48:24 crc kubenswrapper[4812]: E1125 16:48:24.831043 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.831347 4812 scope.go:117] "RemoveContainer" containerID="4d370476c9f3acb7ba10ed410262960ab3d86cd6d4593f17c0081f82e9d2cabe" Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.910613 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.910652 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.910662 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.910683 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:24 crc kubenswrapper[4812]: I1125 16:48:24.910694 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:24Z","lastTransitionTime":"2025-11-25T16:48:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.013671 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.013700 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.013709 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.013723 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.013733 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:25Z","lastTransitionTime":"2025-11-25T16:48:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.116671 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.116710 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.116719 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.116734 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.116747 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:25Z","lastTransitionTime":"2025-11-25T16:48:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.208288 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hwqsk_bc4dc9ff-11a1-4151-91f0-3ff83020b3b9/ovnkube-controller/2.log" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.213441 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" event={"ID":"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9","Type":"ContainerStarted","Data":"94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a"} Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.214448 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.220006 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.220053 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.220065 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.220086 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.220100 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:25Z","lastTransitionTime":"2025-11-25T16:48:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.234297 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-82fvc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbb57832-3993-492b-80c9-a6a61891a125\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8dqsg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8dqsg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:41Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-82fvc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:25Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.251981 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3da77f06-36fd-465a-b764-fabfaca65715\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc3351f45003a0574af7993746039012d2cdb2d17ac1dd8b1f8b629dd4d921f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://794035d1a2dbe8b5a1d28a4174ca6729fed6f236749008a13826d8541edd5a21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d11e018db2635e10a6a329a03047f13b3ddf47115077c36a596b76bdc43275c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c173b4b996674926da742cdbbfe34c03e6711cf1005fff5faf16a266cfffec0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:25Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.268274 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8c88eb3-2223-46c2-ab1c-470a48c53e7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2238540e1f8967fe1d9e64bbc0961e190c011025fe10f4f7757aaaa03f690117\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://396f152de0c7b552d1179bebce809cdcd8a51566b5a4c977b615df21191fd4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d869ab1ee5c083df5c5657f05d76a4e1ff0427f9b01b1eb26a0002d11ed5204\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5927f6ba428b58fbc20621d6c32b9205b2d33526975e335ed6f5c642481453f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e5927f6ba428b58fbc20621d6c32b9205b2d33526975e335ed6f5c642481453f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:25Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.291129 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:25Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.307181 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:25Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.324058 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.324103 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.324113 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.324133 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.324143 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:25Z","lastTransitionTime":"2025-11-25T16:48:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.325636 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ed911cf-2139-4b12-84ba-af635585ba29\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b0d9cef2c2ebce63dec5606b3f472a8ac27c7d5353c9e5ea12a9243e699ecb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lcgpx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:25Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.342682 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d398af8b4b16b15baa75821d0c30b6a72100aac425aca4e87fe9c1c2412673a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gljt8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:25Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.357737 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m7ndd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a156756-3629-4bed-8de0-1019226b7f04\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1eaf67b5a5abeace1075cd58bc8f217c42feab58eddaa10ddf6969b850c3f9f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T16:48:16Z\\\",\\\"message\\\":\\\"2025-11-25T16:47:31+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_de7444d5-6a19-45c4-a479-441123e1e3b9\\\\n2025-11-25T16:47:31+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_de7444d5-6a19-45c4-a479-441123e1e3b9 to /host/opt/cni/bin/\\\\n2025-11-25T16:47:31Z [verbose] multus-daemon started\\\\n2025-11-25T16:47:31Z [verbose] Readiness Indicator file check\\\\n2025-11-25T16:48:16Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:48:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chd8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m7ndd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:25Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.376091 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:25Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.391266 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:25Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.406952 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78c00c81fd5911a4d89ffcb8e555dbe88604cc35b9095a07a9c4d791d26ec9ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:25Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.419332 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:25Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.428597 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.428647 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.428658 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.428682 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.428695 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:25Z","lastTransitionTime":"2025-11-25T16:48:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.432293 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67213ffbc314ee091b39df4df290d4cbe30d310a724e99a5d797592d9b0a333c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:25Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.456298 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d370476c9f3acb7ba10ed410262960ab3d86cd6d4593f17c0081f82e9d2cabe\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T16:47:51Z\\\",\\\"message\\\":\\\"ork-check-target on namespace openshift-network-diagnostics for network=default : 6.828662ms\\\\nI1125 16:47:51.741645 6467 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}\\\\nI1125 16:47:51.741658 6467 services_controller.go:360] Finished syncing service metrics on namespace openshift-service-ca-operator for network=default : 7.967222ms\\\\nI1125 16:47:51.746551 6467 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 16:47:51.746632 6467 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 16:47:51.748460 6467 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 16:47:51.748606 6467 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1125 16:47:51.748659 6467 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 16:47:51.748732 6467 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 16:47:51.748798 6467 factory.go:656] Stopping watch factory\\\\nI1125 16:47:51.748841 6467 ovnkube.go:599] Stopped ovnkube\\\\nI1125 16:47:51.748904 6467 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 16:47:51.749041 6467 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:50Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:48:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hwqsk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:25Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.472795 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:25Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.494607 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c2ba033-91e8-49d8-8474-ebc2dfaeef15\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://179d394510edea595ed1ac39619911e2167f2b9cb5d228635609760932f428e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effc25d4eb530d685c8a7a0acebd25c418c3fc3675b367fe9ab49ae7c22a1cf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7668dab7cab029814b4edd7225c0a6fac9481f4e74f0ab906661b28775cdde45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://69a98172920b9a903cb0defe4630d0c6c1bb43d466ff352eb4d60e36fa18ea23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96c0e65aabd958b32841ea475624de4b58ff5c8690a5c767f6548872f6cc7ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:25Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.507880 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:25Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.524783 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd450cbc-02a7-4b95-8c0f-455df0f1f996\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b3be9778300f71493b3018d697ac9931b4825e96b56386e2d83ad9e75accbd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://067be8c1425967cc2944cc42abc9ab532d98fcc21d129ea57fab364c1aa83d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2fft\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:25Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.531841 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.531897 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.531912 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.531932 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.531945 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:25Z","lastTransitionTime":"2025-11-25T16:48:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.634761 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.634832 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.634841 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.634861 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.634872 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:25Z","lastTransitionTime":"2025-11-25T16:48:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.737266 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.737328 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.737342 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.737371 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.737386 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:25Z","lastTransitionTime":"2025-11-25T16:48:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.830993 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.831045 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.831024 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:48:25 crc kubenswrapper[4812]: E1125 16:48:25.831232 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:48:25 crc kubenswrapper[4812]: E1125 16:48:25.831427 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:48:25 crc kubenswrapper[4812]: E1125 16:48:25.831734 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.839840 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.839882 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.839895 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.839914 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.839928 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:25Z","lastTransitionTime":"2025-11-25T16:48:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.850665 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:25Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.869290 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c2ba033-91e8-49d8-8474-ebc2dfaeef15\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://179d394510edea595ed1ac39619911e2167f2b9cb5d228635609760932f428e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effc25d4eb530d685c8a7a0acebd25c418c3fc3675b367fe9ab49ae7c22a1cf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7668dab7cab029814b4edd7225c0a6fac9481f4e74f0ab906661b28775cdde45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://69a98172920b9a903cb0defe4630d0c6c1bb43d466ff352eb4d60e36fa18ea23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96c0e65aabd958b32841ea475624de4b58ff5c8690a5c767f6548872f6cc7ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:25Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.885194 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:25Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.903694 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd450cbc-02a7-4b95-8c0f-455df0f1f996\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b3be9778300f71493b3018d697ac9931b4825e96b56386e2d83ad9e75accbd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://067be8c1425967cc2944cc42abc9ab532d98fcc21d129ea57fab364c1aa83d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2fft\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:25Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.920984 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d398af8b4b16b15baa75821d0c30b6a72100aac425aca4e87fe9c1c2412673a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gljt8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:25Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.939487 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m7ndd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a156756-3629-4bed-8de0-1019226b7f04\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1eaf67b5a5abeace1075cd58bc8f217c42feab58eddaa10ddf6969b850c3f9f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T16:48:16Z\\\",\\\"message\\\":\\\"2025-11-25T16:47:31+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_de7444d5-6a19-45c4-a479-441123e1e3b9\\\\n2025-11-25T16:47:31+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_de7444d5-6a19-45c4-a479-441123e1e3b9 to /host/opt/cni/bin/\\\\n2025-11-25T16:47:31Z [verbose] multus-daemon started\\\\n2025-11-25T16:47:31Z [verbose] Readiness Indicator file check\\\\n2025-11-25T16:48:16Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:48:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chd8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m7ndd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:25Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.945181 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.945229 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.945242 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.945269 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.945286 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:25Z","lastTransitionTime":"2025-11-25T16:48:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.953318 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-82fvc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbb57832-3993-492b-80c9-a6a61891a125\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8dqsg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8dqsg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:41Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-82fvc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:25Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.969259 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3da77f06-36fd-465a-b764-fabfaca65715\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc3351f45003a0574af7993746039012d2cdb2d17ac1dd8b1f8b629dd4d921f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://794035d1a2dbe8b5a1d28a4174ca6729fed6f236749008a13826d8541edd5a21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d11e018db2635e10a6a329a03047f13b3ddf47115077c36a596b76bdc43275c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c173b4b996674926da742cdbbfe34c03e6711cf1005fff5faf16a266cfffec0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:25Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.986749 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8c88eb3-2223-46c2-ab1c-470a48c53e7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2238540e1f8967fe1d9e64bbc0961e190c011025fe10f4f7757aaaa03f690117\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://396f152de0c7b552d1179bebce809cdcd8a51566b5a4c977b615df21191fd4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d869ab1ee5c083df5c5657f05d76a4e1ff0427f9b01b1eb26a0002d11ed5204\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5927f6ba428b58fbc20621d6c32b9205b2d33526975e335ed6f5c642481453f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e5927f6ba428b58fbc20621d6c32b9205b2d33526975e335ed6f5c642481453f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:25Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:25 crc kubenswrapper[4812]: I1125 16:48:25.999304 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:25Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.015053 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:26Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.028738 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ed911cf-2139-4b12-84ba-af635585ba29\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b0d9cef2c2ebce63dec5606b3f472a8ac27c7d5353c9e5ea12a9243e699ecb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lcgpx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:26Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.041352 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67213ffbc314ee091b39df4df290d4cbe30d310a724e99a5d797592d9b0a333c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:26Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.048189 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.048242 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.048261 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.048288 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.048304 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:26Z","lastTransitionTime":"2025-11-25T16:48:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.063556 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d370476c9f3acb7ba10ed410262960ab3d86cd6d4593f17c0081f82e9d2cabe\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T16:47:51Z\\\",\\\"message\\\":\\\"ork-check-target on namespace openshift-network-diagnostics for network=default : 6.828662ms\\\\nI1125 16:47:51.741645 6467 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}\\\\nI1125 16:47:51.741658 6467 services_controller.go:360] Finished syncing service metrics on namespace openshift-service-ca-operator for network=default : 7.967222ms\\\\nI1125 16:47:51.746551 6467 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 16:47:51.746632 6467 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 16:47:51.748460 6467 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 16:47:51.748606 6467 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1125 16:47:51.748659 6467 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 16:47:51.748732 6467 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 16:47:51.748798 6467 factory.go:656] Stopping watch factory\\\\nI1125 16:47:51.748841 6467 ovnkube.go:599] Stopped ovnkube\\\\nI1125 16:47:51.748904 6467 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 16:47:51.749041 6467 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:50Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:48:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hwqsk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:26Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.078214 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:26Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.090580 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:26Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.101727 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78c00c81fd5911a4d89ffcb8e555dbe88604cc35b9095a07a9c4d791d26ec9ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:26Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.110748 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:26Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.150369 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.150411 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.150422 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.150436 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.150449 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:26Z","lastTransitionTime":"2025-11-25T16:48:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.219151 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hwqsk_bc4dc9ff-11a1-4151-91f0-3ff83020b3b9/ovnkube-controller/3.log" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.220007 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hwqsk_bc4dc9ff-11a1-4151-91f0-3ff83020b3b9/ovnkube-controller/2.log" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.222723 4812 generic.go:334] "Generic (PLEG): container finished" podID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerID="94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a" exitCode=1 Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.222760 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" event={"ID":"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9","Type":"ContainerDied","Data":"94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a"} Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.222808 4812 scope.go:117] "RemoveContainer" containerID="4d370476c9f3acb7ba10ed410262960ab3d86cd6d4593f17c0081f82e9d2cabe" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.224665 4812 scope.go:117] "RemoveContainer" containerID="94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a" Nov 25 16:48:26 crc kubenswrapper[4812]: E1125 16:48:26.224902 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hwqsk_openshift-ovn-kubernetes(bc4dc9ff-11a1-4151-91f0-3ff83020b3b9)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.244627 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:26Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.253780 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.253833 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.253847 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.253866 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.253879 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:26Z","lastTransitionTime":"2025-11-25T16:48:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.258703 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ed911cf-2139-4b12-84ba-af635585ba29\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b0d9cef2c2ebce63dec5606b3f472a8ac27c7d5353c9e5ea12a9243e699ecb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lcgpx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:26Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.272301 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d398af8b4b16b15baa75821d0c30b6a72100aac425aca4e87fe9c1c2412673a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gljt8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:26Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.290280 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m7ndd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a156756-3629-4bed-8de0-1019226b7f04\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1eaf67b5a5abeace1075cd58bc8f217c42feab58eddaa10ddf6969b850c3f9f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T16:48:16Z\\\",\\\"message\\\":\\\"2025-11-25T16:47:31+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_de7444d5-6a19-45c4-a479-441123e1e3b9\\\\n2025-11-25T16:47:31+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_de7444d5-6a19-45c4-a479-441123e1e3b9 to /host/opt/cni/bin/\\\\n2025-11-25T16:47:31Z [verbose] multus-daemon started\\\\n2025-11-25T16:47:31Z [verbose] Readiness Indicator file check\\\\n2025-11-25T16:48:16Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:48:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chd8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m7ndd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:26Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.301179 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-82fvc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbb57832-3993-492b-80c9-a6a61891a125\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8dqsg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8dqsg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:41Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-82fvc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:26Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.313117 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3da77f06-36fd-465a-b764-fabfaca65715\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc3351f45003a0574af7993746039012d2cdb2d17ac1dd8b1f8b629dd4d921f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://794035d1a2dbe8b5a1d28a4174ca6729fed6f236749008a13826d8541edd5a21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d11e018db2635e10a6a329a03047f13b3ddf47115077c36a596b76bdc43275c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c173b4b996674926da742cdbbfe34c03e6711cf1005fff5faf16a266cfffec0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:26Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.328750 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8c88eb3-2223-46c2-ab1c-470a48c53e7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2238540e1f8967fe1d9e64bbc0961e190c011025fe10f4f7757aaaa03f690117\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://396f152de0c7b552d1179bebce809cdcd8a51566b5a4c977b615df21191fd4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d869ab1ee5c083df5c5657f05d76a4e1ff0427f9b01b1eb26a0002d11ed5204\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5927f6ba428b58fbc20621d6c32b9205b2d33526975e335ed6f5c642481453f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e5927f6ba428b58fbc20621d6c32b9205b2d33526975e335ed6f5c642481453f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:26Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.339667 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:26Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.350576 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78c00c81fd5911a4d89ffcb8e555dbe88604cc35b9095a07a9c4d791d26ec9ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:26Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.356143 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.356168 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.356176 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.356189 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.356234 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:26Z","lastTransitionTime":"2025-11-25T16:48:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.360829 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:26Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.371468 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67213ffbc314ee091b39df4df290d4cbe30d310a724e99a5d797592d9b0a333c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:26Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.388199 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d370476c9f3acb7ba10ed410262960ab3d86cd6d4593f17c0081f82e9d2cabe\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T16:47:51Z\\\",\\\"message\\\":\\\"ork-check-target on namespace openshift-network-diagnostics for network=default : 6.828662ms\\\\nI1125 16:47:51.741645 6467 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}\\\\nI1125 16:47:51.741658 6467 services_controller.go:360] Finished syncing service metrics on namespace openshift-service-ca-operator for network=default : 7.967222ms\\\\nI1125 16:47:51.746551 6467 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 16:47:51.746632 6467 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 16:47:51.748460 6467 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 16:47:51.748606 6467 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1125 16:47:51.748659 6467 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 16:47:51.748732 6467 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 16:47:51.748798 6467 factory.go:656] Stopping watch factory\\\\nI1125 16:47:51.748841 6467 ovnkube.go:599] Stopped ovnkube\\\\nI1125 16:47:51.748904 6467 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 16:47:51.749041 6467 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:50Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T16:48:25Z\\\",\\\"message\\\":\\\"anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:25Z is after 2025-08-24T17:21:41Z]\\\\nI1125 16:48:25.751237 6834 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nI1125 16:48:25.751254 6834 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nI1125 16:48:25.751215 6834 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-machine-api/cluster-autoscaler-operator]} name:Service_openshift-machine-api/cluster-autoscaler-operator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:m\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:48:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hwqsk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:26Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.401733 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:26Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.413779 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:26Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.427403 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:26Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.446977 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c2ba033-91e8-49d8-8474-ebc2dfaeef15\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://179d394510edea595ed1ac39619911e2167f2b9cb5d228635609760932f428e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effc25d4eb530d685c8a7a0acebd25c418c3fc3675b367fe9ab49ae7c22a1cf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7668dab7cab029814b4edd7225c0a6fac9481f4e74f0ab906661b28775cdde45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://69a98172920b9a903cb0defe4630d0c6c1bb43d466ff352eb4d60e36fa18ea23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96c0e65aabd958b32841ea475624de4b58ff5c8690a5c767f6548872f6cc7ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:26Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.457935 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.457960 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.457967 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.457980 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.457989 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:26Z","lastTransitionTime":"2025-11-25T16:48:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.459917 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:26Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.471508 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd450cbc-02a7-4b95-8c0f-455df0f1f996\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b3be9778300f71493b3018d697ac9931b4825e96b56386e2d83ad9e75accbd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://067be8c1425967cc2944cc42abc9ab532d98fcc21d129ea57fab364c1aa83d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2fft\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:26Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.560089 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.560124 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.560131 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.560145 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.560157 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:26Z","lastTransitionTime":"2025-11-25T16:48:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.662806 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.662835 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.662842 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.662855 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.662863 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:26Z","lastTransitionTime":"2025-11-25T16:48:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.765578 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.765625 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.765641 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.765662 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.765680 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:26Z","lastTransitionTime":"2025-11-25T16:48:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.830899 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:48:26 crc kubenswrapper[4812]: E1125 16:48:26.831038 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.867573 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.867604 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.867612 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.867627 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.867636 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:26Z","lastTransitionTime":"2025-11-25T16:48:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.969935 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.969986 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.970001 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.970021 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:26 crc kubenswrapper[4812]: I1125 16:48:26.970036 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:26Z","lastTransitionTime":"2025-11-25T16:48:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.072723 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.072763 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.072772 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.072787 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.072797 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:27Z","lastTransitionTime":"2025-11-25T16:48:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.174916 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.174962 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.174978 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.174996 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.175012 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:27Z","lastTransitionTime":"2025-11-25T16:48:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.226334 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hwqsk_bc4dc9ff-11a1-4151-91f0-3ff83020b3b9/ovnkube-controller/3.log" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.229955 4812 scope.go:117] "RemoveContainer" containerID="94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a" Nov 25 16:48:27 crc kubenswrapper[4812]: E1125 16:48:27.230140 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hwqsk_openshift-ovn-kubernetes(bc4dc9ff-11a1-4151-91f0-3ff83020b3b9)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.242220 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.263133 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c2ba033-91e8-49d8-8474-ebc2dfaeef15\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://179d394510edea595ed1ac39619911e2167f2b9cb5d228635609760932f428e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effc25d4eb530d685c8a7a0acebd25c418c3fc3675b367fe9ab49ae7c22a1cf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7668dab7cab029814b4edd7225c0a6fac9481f4e74f0ab906661b28775cdde45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://69a98172920b9a903cb0defe4630d0c6c1bb43d466ff352eb4d60e36fa18ea23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96c0e65aabd958b32841ea475624de4b58ff5c8690a5c767f6548872f6cc7ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.276255 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.277256 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.277291 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.277300 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.277318 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.277329 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:27Z","lastTransitionTime":"2025-11-25T16:48:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.288476 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd450cbc-02a7-4b95-8c0f-455df0f1f996\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b3be9778300f71493b3018d697ac9931b4825e96b56386e2d83ad9e75accbd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://067be8c1425967cc2944cc42abc9ab532d98fcc21d129ea57fab364c1aa83d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2fft\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.302655 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d398af8b4b16b15baa75821d0c30b6a72100aac425aca4e87fe9c1c2412673a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gljt8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.317457 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m7ndd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a156756-3629-4bed-8de0-1019226b7f04\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1eaf67b5a5abeace1075cd58bc8f217c42feab58eddaa10ddf6969b850c3f9f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T16:48:16Z\\\",\\\"message\\\":\\\"2025-11-25T16:47:31+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_de7444d5-6a19-45c4-a479-441123e1e3b9\\\\n2025-11-25T16:47:31+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_de7444d5-6a19-45c4-a479-441123e1e3b9 to /host/opt/cni/bin/\\\\n2025-11-25T16:47:31Z [verbose] multus-daemon started\\\\n2025-11-25T16:47:31Z [verbose] Readiness Indicator file check\\\\n2025-11-25T16:48:16Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:48:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chd8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m7ndd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.327643 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-82fvc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbb57832-3993-492b-80c9-a6a61891a125\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8dqsg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8dqsg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:41Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-82fvc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.339118 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3da77f06-36fd-465a-b764-fabfaca65715\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc3351f45003a0574af7993746039012d2cdb2d17ac1dd8b1f8b629dd4d921f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://794035d1a2dbe8b5a1d28a4174ca6729fed6f236749008a13826d8541edd5a21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d11e018db2635e10a6a329a03047f13b3ddf47115077c36a596b76bdc43275c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c173b4b996674926da742cdbbfe34c03e6711cf1005fff5faf16a266cfffec0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.350987 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8c88eb3-2223-46c2-ab1c-470a48c53e7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2238540e1f8967fe1d9e64bbc0961e190c011025fe10f4f7757aaaa03f690117\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://396f152de0c7b552d1179bebce809cdcd8a51566b5a4c977b615df21191fd4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d869ab1ee5c083df5c5657f05d76a4e1ff0427f9b01b1eb26a0002d11ed5204\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5927f6ba428b58fbc20621d6c32b9205b2d33526975e335ed6f5c642481453f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e5927f6ba428b58fbc20621d6c32b9205b2d33526975e335ed6f5c642481453f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.363304 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.376219 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.379390 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.379431 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.379441 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.379456 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.379466 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:27Z","lastTransitionTime":"2025-11-25T16:48:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.388055 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ed911cf-2139-4b12-84ba-af635585ba29\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b0d9cef2c2ebce63dec5606b3f472a8ac27c7d5353c9e5ea12a9243e699ecb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lcgpx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.398290 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67213ffbc314ee091b39df4df290d4cbe30d310a724e99a5d797592d9b0a333c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.415234 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T16:48:25Z\\\",\\\"message\\\":\\\"anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:25Z is after 2025-08-24T17:21:41Z]\\\\nI1125 16:48:25.751237 6834 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nI1125 16:48:25.751254 6834 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nI1125 16:48:25.751215 6834 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-machine-api/cluster-autoscaler-operator]} name:Service_openshift-machine-api/cluster-autoscaler-operator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:m\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:48:24Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hwqsk_openshift-ovn-kubernetes(bc4dc9ff-11a1-4151-91f0-3ff83020b3b9)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hwqsk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.427178 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.438943 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.451752 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78c00c81fd5911a4d89ffcb8e555dbe88604cc35b9095a07a9c4d791d26ec9ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.462386 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:27Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.481082 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.481117 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.481125 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.481137 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.481146 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:27Z","lastTransitionTime":"2025-11-25T16:48:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.584018 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.584080 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.584090 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.584103 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.584111 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:27Z","lastTransitionTime":"2025-11-25T16:48:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.687855 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.687894 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.687904 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.687920 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.687939 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:27Z","lastTransitionTime":"2025-11-25T16:48:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.790883 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.790945 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.790964 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.790987 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.791011 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:27Z","lastTransitionTime":"2025-11-25T16:48:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.831407 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.831469 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:48:27 crc kubenswrapper[4812]: E1125 16:48:27.831609 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:48:27 crc kubenswrapper[4812]: E1125 16:48:27.831694 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.831763 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:48:27 crc kubenswrapper[4812]: E1125 16:48:27.831960 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.894137 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.894186 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.894202 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.894218 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.894230 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:27Z","lastTransitionTime":"2025-11-25T16:48:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.996349 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.996399 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.996414 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.996436 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:27 crc kubenswrapper[4812]: I1125 16:48:27.996452 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:27Z","lastTransitionTime":"2025-11-25T16:48:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.099061 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.099133 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.099158 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.099189 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.099213 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:28Z","lastTransitionTime":"2025-11-25T16:48:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.201273 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.201327 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.201339 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.201354 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.201363 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:28Z","lastTransitionTime":"2025-11-25T16:48:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.303794 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.303851 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.303863 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.303899 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.303915 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:28Z","lastTransitionTime":"2025-11-25T16:48:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.406783 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.406831 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.406844 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.406860 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.406872 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:28Z","lastTransitionTime":"2025-11-25T16:48:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.510171 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.510216 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.510225 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.510242 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.510251 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:28Z","lastTransitionTime":"2025-11-25T16:48:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.612891 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.612972 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.612987 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.613012 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.613028 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:28Z","lastTransitionTime":"2025-11-25T16:48:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.715635 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.715683 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.715696 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.715712 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.715725 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:28Z","lastTransitionTime":"2025-11-25T16:48:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.818611 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.818684 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.818710 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.818744 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.818769 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:28Z","lastTransitionTime":"2025-11-25T16:48:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.830760 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:48:28 crc kubenswrapper[4812]: E1125 16:48:28.830957 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.922240 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.922355 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.922393 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.922427 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:28 crc kubenswrapper[4812]: I1125 16:48:28.922450 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:28Z","lastTransitionTime":"2025-11-25T16:48:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.025311 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.025349 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.025359 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.025375 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.025385 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:29Z","lastTransitionTime":"2025-11-25T16:48:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.127826 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.127927 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.127940 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.127960 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.127972 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:29Z","lastTransitionTime":"2025-11-25T16:48:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.230486 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.230520 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.230551 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.230574 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.230585 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:29Z","lastTransitionTime":"2025-11-25T16:48:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.333504 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.333562 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.333573 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.333586 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.333596 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:29Z","lastTransitionTime":"2025-11-25T16:48:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.436076 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.436118 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.436130 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.436144 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.436153 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:29Z","lastTransitionTime":"2025-11-25T16:48:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.538153 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.538200 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.538215 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.538237 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.538250 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:29Z","lastTransitionTime":"2025-11-25T16:48:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.598153 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:48:29 crc kubenswrapper[4812]: E1125 16:48:29.598245 4812 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 16:48:29 crc kubenswrapper[4812]: E1125 16:48:29.598312 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 16:49:33.598298521 +0000 UTC m=+148.438440616 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 16:48:29 crc kubenswrapper[4812]: E1125 16:48:29.598321 4812 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.598245 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:48:29 crc kubenswrapper[4812]: E1125 16:48:29.598358 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 16:49:33.598347763 +0000 UTC m=+148.438489858 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.640690 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.640732 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.640740 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.640754 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.640765 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:29Z","lastTransitionTime":"2025-11-25T16:48:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.699692 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.699819 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.699845 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:48:29 crc kubenswrapper[4812]: E1125 16:48:29.699951 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:33.699919767 +0000 UTC m=+148.540061872 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:48:29 crc kubenswrapper[4812]: E1125 16:48:29.700021 4812 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 16:48:29 crc kubenswrapper[4812]: E1125 16:48:29.700059 4812 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 16:48:29 crc kubenswrapper[4812]: E1125 16:48:29.700072 4812 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 16:48:29 crc kubenswrapper[4812]: E1125 16:48:29.700134 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 16:49:33.700119592 +0000 UTC m=+148.540261687 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 16:48:29 crc kubenswrapper[4812]: E1125 16:48:29.700260 4812 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 16:48:29 crc kubenswrapper[4812]: E1125 16:48:29.700378 4812 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 16:48:29 crc kubenswrapper[4812]: E1125 16:48:29.700413 4812 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 16:48:29 crc kubenswrapper[4812]: E1125 16:48:29.700584 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 16:49:33.700509162 +0000 UTC m=+148.540651297 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.743642 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.743732 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.743758 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.743788 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.743805 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:29Z","lastTransitionTime":"2025-11-25T16:48:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.831412 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.831423 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:48:29 crc kubenswrapper[4812]: E1125 16:48:29.831736 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.831455 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:48:29 crc kubenswrapper[4812]: E1125 16:48:29.831868 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:48:29 crc kubenswrapper[4812]: E1125 16:48:29.831907 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.845836 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.845886 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.845897 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.845913 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.845924 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:29Z","lastTransitionTime":"2025-11-25T16:48:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.948555 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.948634 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.948659 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.948690 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:29 crc kubenswrapper[4812]: I1125 16:48:29.948711 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:29Z","lastTransitionTime":"2025-11-25T16:48:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.051768 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.051854 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.051873 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.051900 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.051921 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:30Z","lastTransitionTime":"2025-11-25T16:48:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.155515 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.155617 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.155638 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.155666 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.155687 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:30Z","lastTransitionTime":"2025-11-25T16:48:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.258604 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.258666 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.258682 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.258708 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.258726 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:30Z","lastTransitionTime":"2025-11-25T16:48:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.361083 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.361157 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.361168 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.361187 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.361199 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:30Z","lastTransitionTime":"2025-11-25T16:48:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.464657 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.464712 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.464726 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.464756 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.464775 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:30Z","lastTransitionTime":"2025-11-25T16:48:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.567912 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.568008 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.568028 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.568050 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.568095 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:30Z","lastTransitionTime":"2025-11-25T16:48:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.671919 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.671997 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.672009 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.672028 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.672040 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:30Z","lastTransitionTime":"2025-11-25T16:48:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.775768 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.775859 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.775876 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.775899 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.775914 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:30Z","lastTransitionTime":"2025-11-25T16:48:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.831302 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:48:30 crc kubenswrapper[4812]: E1125 16:48:30.831504 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.878365 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.878408 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.878422 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.878439 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.878450 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:30Z","lastTransitionTime":"2025-11-25T16:48:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.980595 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.980658 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.980670 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.980698 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:30 crc kubenswrapper[4812]: I1125 16:48:30.980714 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:30Z","lastTransitionTime":"2025-11-25T16:48:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.084597 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.084697 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.084722 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.084760 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.084784 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:31Z","lastTransitionTime":"2025-11-25T16:48:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.187921 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.187994 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.188012 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.188041 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.188065 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:31Z","lastTransitionTime":"2025-11-25T16:48:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.292178 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.292260 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.292275 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.292303 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.292321 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:31Z","lastTransitionTime":"2025-11-25T16:48:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.395954 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.396042 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.396062 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.396094 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.396116 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:31Z","lastTransitionTime":"2025-11-25T16:48:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.499894 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.499974 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.499997 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.500034 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.500072 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:31Z","lastTransitionTime":"2025-11-25T16:48:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.601997 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.602030 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.602062 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.602077 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.602086 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:31Z","lastTransitionTime":"2025-11-25T16:48:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.705527 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.705643 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.705668 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.705702 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.705725 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:31Z","lastTransitionTime":"2025-11-25T16:48:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.809075 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.809150 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.809163 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.809185 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.809199 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:31Z","lastTransitionTime":"2025-11-25T16:48:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.831195 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.831241 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.831420 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:48:31 crc kubenswrapper[4812]: E1125 16:48:31.831521 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:48:31 crc kubenswrapper[4812]: E1125 16:48:31.831696 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:48:31 crc kubenswrapper[4812]: E1125 16:48:31.831809 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.843013 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.912960 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.913026 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.913035 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.913057 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:31 crc kubenswrapper[4812]: I1125 16:48:31.913070 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:31Z","lastTransitionTime":"2025-11-25T16:48:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.016613 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.016689 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.016703 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.016723 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.016736 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:32Z","lastTransitionTime":"2025-11-25T16:48:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.120351 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.120430 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.120453 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.120480 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.120497 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:32Z","lastTransitionTime":"2025-11-25T16:48:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.223672 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.223720 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.223731 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.223753 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.223767 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:32Z","lastTransitionTime":"2025-11-25T16:48:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.327564 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.327640 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.327666 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.327706 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.327734 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:32Z","lastTransitionTime":"2025-11-25T16:48:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.430902 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.430956 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.430970 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.430999 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.431015 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:32Z","lastTransitionTime":"2025-11-25T16:48:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.534015 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.534381 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.534400 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.534434 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.534452 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:32Z","lastTransitionTime":"2025-11-25T16:48:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.638266 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.638338 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.638356 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.638387 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.638410 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:32Z","lastTransitionTime":"2025-11-25T16:48:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.742486 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.742601 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.742627 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.742656 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.742674 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:32Z","lastTransitionTime":"2025-11-25T16:48:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.830780 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:48:32 crc kubenswrapper[4812]: E1125 16:48:32.830988 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.846192 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.846325 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.846352 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.846385 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.846407 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:32Z","lastTransitionTime":"2025-11-25T16:48:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.949962 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.950030 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.950050 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.950078 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:32 crc kubenswrapper[4812]: I1125 16:48:32.950096 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:32Z","lastTransitionTime":"2025-11-25T16:48:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.054101 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.054204 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.054224 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.054255 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.054279 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:33Z","lastTransitionTime":"2025-11-25T16:48:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.157068 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.157124 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.157135 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.157154 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.157166 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:33Z","lastTransitionTime":"2025-11-25T16:48:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.259681 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.259728 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.259740 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.259757 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.259767 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:33Z","lastTransitionTime":"2025-11-25T16:48:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.363002 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.363050 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.363063 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.363080 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.363090 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:33Z","lastTransitionTime":"2025-11-25T16:48:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.465749 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.465795 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.465806 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.465824 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.465834 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:33Z","lastTransitionTime":"2025-11-25T16:48:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.569596 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.569671 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.569685 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.569712 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.569731 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:33Z","lastTransitionTime":"2025-11-25T16:48:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.672461 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.672578 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.672599 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.672627 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.672644 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:33Z","lastTransitionTime":"2025-11-25T16:48:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.776006 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.776069 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.776081 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.776105 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.776119 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:33Z","lastTransitionTime":"2025-11-25T16:48:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.830558 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:48:33 crc kubenswrapper[4812]: E1125 16:48:33.830767 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.831107 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:48:33 crc kubenswrapper[4812]: E1125 16:48:33.831213 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.831408 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:48:33 crc kubenswrapper[4812]: E1125 16:48:33.831478 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.878968 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.879041 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.879067 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.879100 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.879127 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:33Z","lastTransitionTime":"2025-11-25T16:48:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.982512 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.982700 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.982730 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.982762 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:33 crc kubenswrapper[4812]: I1125 16:48:33.982789 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:33Z","lastTransitionTime":"2025-11-25T16:48:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.076103 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.076159 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.076172 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.076193 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.076206 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:34Z","lastTransitionTime":"2025-11-25T16:48:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:34 crc kubenswrapper[4812]: E1125 16:48:34.091063 4812 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"42bff2ed-94cf-457c-8bcf-017111af962a\\\",\\\"systemUUID\\\":\\\"93542aec-3cae-4037-9cb4-28e49d8b2f68\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:34Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.095786 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.095825 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.095837 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.095859 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.095870 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:34Z","lastTransitionTime":"2025-11-25T16:48:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:34 crc kubenswrapper[4812]: E1125 16:48:34.110677 4812 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"42bff2ed-94cf-457c-8bcf-017111af962a\\\",\\\"systemUUID\\\":\\\"93542aec-3cae-4037-9cb4-28e49d8b2f68\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:34Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.115270 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.115326 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.115337 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.115365 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.115380 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:34Z","lastTransitionTime":"2025-11-25T16:48:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:34 crc kubenswrapper[4812]: E1125 16:48:34.127564 4812 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"42bff2ed-94cf-457c-8bcf-017111af962a\\\",\\\"systemUUID\\\":\\\"93542aec-3cae-4037-9cb4-28e49d8b2f68\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:34Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.132326 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.132369 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.132379 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.132398 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.132411 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:34Z","lastTransitionTime":"2025-11-25T16:48:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:34 crc kubenswrapper[4812]: E1125 16:48:34.152612 4812 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"42bff2ed-94cf-457c-8bcf-017111af962a\\\",\\\"systemUUID\\\":\\\"93542aec-3cae-4037-9cb4-28e49d8b2f68\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:34Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.157583 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.157625 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.157634 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.157650 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.157662 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:34Z","lastTransitionTime":"2025-11-25T16:48:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:34 crc kubenswrapper[4812]: E1125 16:48:34.172655 4812 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:34Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"42bff2ed-94cf-457c-8bcf-017111af962a\\\",\\\"systemUUID\\\":\\\"93542aec-3cae-4037-9cb4-28e49d8b2f68\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:34Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:34 crc kubenswrapper[4812]: E1125 16:48:34.172819 4812 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.174764 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.174799 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.174812 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.174836 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.174850 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:34Z","lastTransitionTime":"2025-11-25T16:48:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.277782 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.277846 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.277861 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.277885 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.277901 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:34Z","lastTransitionTime":"2025-11-25T16:48:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.380668 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.380807 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.380828 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.380861 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.380888 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:34Z","lastTransitionTime":"2025-11-25T16:48:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.484150 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.484206 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.484216 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.484235 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.484247 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:34Z","lastTransitionTime":"2025-11-25T16:48:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.587892 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.587979 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.587993 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.588023 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.588040 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:34Z","lastTransitionTime":"2025-11-25T16:48:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.691297 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.691405 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.691416 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.691434 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.691447 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:34Z","lastTransitionTime":"2025-11-25T16:48:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.794275 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.794324 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.794335 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.794356 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.794368 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:34Z","lastTransitionTime":"2025-11-25T16:48:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.830962 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:48:34 crc kubenswrapper[4812]: E1125 16:48:34.831186 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.897109 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.897184 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.897207 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.897243 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:34 crc kubenswrapper[4812]: I1125 16:48:34.897267 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:34Z","lastTransitionTime":"2025-11-25T16:48:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.001382 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.001457 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.001470 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.001489 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.001500 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:35Z","lastTransitionTime":"2025-11-25T16:48:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.105168 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.105227 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.105242 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.105266 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.105283 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:35Z","lastTransitionTime":"2025-11-25T16:48:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.209265 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.209744 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.209845 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.210138 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.210219 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:35Z","lastTransitionTime":"2025-11-25T16:48:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.312894 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.312955 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.312966 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.312983 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.312993 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:35Z","lastTransitionTime":"2025-11-25T16:48:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.416158 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.416225 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.416240 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.416267 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.416281 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:35Z","lastTransitionTime":"2025-11-25T16:48:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.519604 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.519672 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.519686 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.519712 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.519727 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:35Z","lastTransitionTime":"2025-11-25T16:48:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.622637 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.622673 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.622690 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.622710 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.622722 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:35Z","lastTransitionTime":"2025-11-25T16:48:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.725548 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.725597 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.725610 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.725626 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.725637 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:35Z","lastTransitionTime":"2025-11-25T16:48:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.828790 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.828836 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.829038 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.829056 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.829067 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:35Z","lastTransitionTime":"2025-11-25T16:48:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.830694 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.830736 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.830694 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:48:35 crc kubenswrapper[4812]: E1125 16:48:35.830894 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:48:35 crc kubenswrapper[4812]: E1125 16:48:35.830950 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:48:35 crc kubenswrapper[4812]: E1125 16:48:35.831081 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.843300 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c74c77ad-3905-40bc-8f24-78ace63cb229\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://53487d51602502ad1f7e429507c24ebbffa5a6235663c6c8de0d67240a2c9bee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bfc00f90488779a576e3485989a9abc5251762db849ed9f317f7556d2f5d69c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bfc00f90488779a576e3485989a9abc5251762db849ed9f317f7556d2f5d69c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:35Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.860838 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c0c9f9c59919cdd2cbb57396506ce83265ac6b94d34fe2afd36b73a066db9eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:35Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.876671 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:35Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.893872 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78c00c81fd5911a4d89ffcb8e555dbe88604cc35b9095a07a9c4d791d26ec9ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:35Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.908449 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dgtfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"889fccf3-a82f-469b-97d3-094dc96045d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f899009f11ec708b44a35ba4f97660882a13fbba1dddd79ff82723d53aa30d4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4vvd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dgtfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:35Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.924476 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-99qrk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a7a3183-ff61-40f7-aa03-af1e5c4252f1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67213ffbc314ee091b39df4df290d4cbe30d310a724e99a5d797592d9b0a333c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ddnk7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-99qrk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:35Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.931071 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.931100 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.931108 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.931123 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.931132 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:35Z","lastTransitionTime":"2025-11-25T16:48:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.944435 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T16:48:25Z\\\",\\\"message\\\":\\\"anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:25Z is after 2025-08-24T17:21:41Z]\\\\nI1125 16:48:25.751237 6834 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nI1125 16:48:25.751254 6834 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nI1125 16:48:25.751215 6834 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-machine-api/cluster-autoscaler-operator]} name:Service_openshift-machine-api/cluster-autoscaler-operator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:m\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:48:24Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hwqsk_openshift-ovn-kubernetes(bc4dc9ff-11a1-4151-91f0-3ff83020b3b9)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtpr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hwqsk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:35Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.958892 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96c426b077dd62b031695f9cf5fc3d50221472e524a3190ccfabfa136cac212e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f6f9d45f8083070bcf33a1a32213c5e141cfa4acabc9af78716968e433241cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:35Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:35 crc kubenswrapper[4812]: I1125 16:48:35.980286 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c2ba033-91e8-49d8-8474-ebc2dfaeef15\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://179d394510edea595ed1ac39619911e2167f2b9cb5d228635609760932f428e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://effc25d4eb530d685c8a7a0acebd25c418c3fc3675b367fe9ab49ae7c22a1cf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7668dab7cab029814b4edd7225c0a6fac9481f4e74f0ab906661b28775cdde45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://69a98172920b9a903cb0defe4630d0c6c1bb43d466ff352eb4d60e36fa18ea23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96c0e65aabd958b32841ea475624de4b58ff5c8690a5c767f6548872f6cc7ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21cc30f1e21f20bd2db4b5879a0138c8e37718e49141104919f2a62125f71b10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f53879b6504174e7d31149ef77734471798f4b037509a9ee5efd545d3dbe55c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ea7d420c1bc156c7ec592d964fe48c423540afdab8b94ebf27571755c23048b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:35Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.005440 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bacb2994-771c-4add-8b78-afff14608f76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1125 16:47:19.291002 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 16:47:19.292670 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2843791565/tls.crt::/tmp/serving-cert-2843791565/tls.key\\\\\\\"\\\\nI1125 16:47:25.694929 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 16:47:25.697343 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 16:47:25.697361 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 16:47:25.697377 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 16:47:25.697382 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 16:47:25.704824 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1125 16:47:25.704838 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1125 16:47:25.704852 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704858 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 16:47:25.704863 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 16:47:25.704867 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 16:47:25.704871 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 16:47:25.704875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1125 16:47:25.707641 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:36Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.021726 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd450cbc-02a7-4b95-8c0f-455df0f1f996\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b3be9778300f71493b3018d697ac9931b4825e96b56386e2d83ad9e75accbd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://067be8c1425967cc2944cc42abc9ab532d98fcc21d129ea57fab364c1aa83d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-k9kb5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2fft\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:36Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.033924 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.034069 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.034209 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.034341 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.034830 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:36Z","lastTransitionTime":"2025-11-25T16:48:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.039165 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-82fvc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbb57832-3993-492b-80c9-a6a61891a125\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8dqsg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8dqsg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:41Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-82fvc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:36Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.059894 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3da77f06-36fd-465a-b764-fabfaca65715\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc3351f45003a0574af7993746039012d2cdb2d17ac1dd8b1f8b629dd4d921f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://794035d1a2dbe8b5a1d28a4174ca6729fed6f236749008a13826d8541edd5a21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d11e018db2635e10a6a329a03047f13b3ddf47115077c36a596b76bdc43275c4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c173b4b996674926da742cdbbfe34c03e6711cf1005fff5faf16a266cfffec0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:36Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.074693 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d8c88eb3-2223-46c2-ab1c-470a48c53e7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2238540e1f8967fe1d9e64bbc0961e190c011025fe10f4f7757aaaa03f690117\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://396f152de0c7b552d1179bebce809cdcd8a51566b5a4c977b615df21191fd4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d869ab1ee5c083df5c5657f05d76a4e1ff0427f9b01b1eb26a0002d11ed5204\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5927f6ba428b58fbc20621d6c32b9205b2d33526975e335ed6f5c642481453f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e5927f6ba428b58fbc20621d6c32b9205b2d33526975e335ed6f5c642481453f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:06Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:05Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:36Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.087091 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:36Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.099471 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:25Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:36Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.110925 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ed911cf-2139-4b12-84ba-af635585ba29\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b0d9cef2c2ebce63dec5606b3f472a8ac27c7d5353c9e5ea12a9243e699ecb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhsb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lcgpx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:36Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.125663 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gljt8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eaeac0de-94b4-43d0-b72f-3a70c6d348c6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d398af8b4b16b15baa75821d0c30b6a72100aac425aca4e87fe9c1c2412673a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:47:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://571e1328a3d8bf0092dfc6468ea006b81d7cfa65a954f3f100edfa835d6bee19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34ad4544726de20ff6ea58dc055e23ee43fa7afac4b683fc8f3d7bf5bd813bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5bf04faa4ce9884ec922d7bbffad880d0425269eabae75ca4172f339121d7e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4de99b035399bef0fe7b83bc130532c9eff769a1f668e388fce316af154db5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f5a8dbc3ba39ee88ce797ca17acfd29d912e2fb6905e9ad8fb240d22ac397ce5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b62cae756e4b2341ea61b4c95cd41c56b2aab9c8f63de26e372cdd756acdb68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T16:47:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gmtjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gljt8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:36Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.137500 4812 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m7ndd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a156756-3629-4bed-8de0-1019226b7f04\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:47:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T16:48:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1eaf67b5a5abeace1075cd58bc8f217c42feab58eddaa10ddf6969b850c3f9f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T16:48:16Z\\\",\\\"message\\\":\\\"2025-11-25T16:47:31+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_de7444d5-6a19-45c4-a479-441123e1e3b9\\\\n2025-11-25T16:47:31+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_de7444d5-6a19-45c4-a479-441123e1e3b9 to /host/opt/cni/bin/\\\\n2025-11-25T16:47:31Z [verbose] multus-daemon started\\\\n2025-11-25T16:47:31Z [verbose] Readiness Indicator file check\\\\n2025-11-25T16:48:16Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T16:47:29Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T16:48:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chd8d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T16:47:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m7ndd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T16:48:36Z is after 2025-08-24T17:21:41Z" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.139939 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.139983 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.139997 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.140014 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.140026 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:36Z","lastTransitionTime":"2025-11-25T16:48:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.241673 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.241718 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.241731 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.241746 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.241759 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:36Z","lastTransitionTime":"2025-11-25T16:48:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.344114 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.344149 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.344160 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.344175 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.344186 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:36Z","lastTransitionTime":"2025-11-25T16:48:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.447079 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.447131 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.447141 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.447165 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.447176 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:36Z","lastTransitionTime":"2025-11-25T16:48:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.549312 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.549359 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.549369 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.549384 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.549394 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:36Z","lastTransitionTime":"2025-11-25T16:48:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.652867 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.652920 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.652932 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.652954 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.652965 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:36Z","lastTransitionTime":"2025-11-25T16:48:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.755064 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.755100 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.755108 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.755122 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.755132 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:36Z","lastTransitionTime":"2025-11-25T16:48:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.831615 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:48:36 crc kubenswrapper[4812]: E1125 16:48:36.831862 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.858096 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.858148 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.858158 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.858176 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.858188 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:36Z","lastTransitionTime":"2025-11-25T16:48:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.961173 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.961571 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.961754 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.962158 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:36 crc kubenswrapper[4812]: I1125 16:48:36.962328 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:36Z","lastTransitionTime":"2025-11-25T16:48:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.066137 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.066847 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.066915 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.066943 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.066964 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:37Z","lastTransitionTime":"2025-11-25T16:48:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.170038 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.170091 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.170106 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.170136 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.170152 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:37Z","lastTransitionTime":"2025-11-25T16:48:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.272272 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.272324 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.272333 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.272350 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.272360 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:37Z","lastTransitionTime":"2025-11-25T16:48:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.374804 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.374855 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.374868 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.374894 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.374907 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:37Z","lastTransitionTime":"2025-11-25T16:48:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.477238 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.477308 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.477322 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.477338 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.477347 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:37Z","lastTransitionTime":"2025-11-25T16:48:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.580276 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.580600 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.580697 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.580841 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.581105 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:37Z","lastTransitionTime":"2025-11-25T16:48:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.684002 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.684040 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.684050 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.684069 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.684082 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:37Z","lastTransitionTime":"2025-11-25T16:48:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.786824 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.786909 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.786927 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.787021 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.787040 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:37Z","lastTransitionTime":"2025-11-25T16:48:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.830667 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.830769 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:48:37 crc kubenswrapper[4812]: E1125 16:48:37.830802 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.830764 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:48:37 crc kubenswrapper[4812]: E1125 16:48:37.830910 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:48:37 crc kubenswrapper[4812]: E1125 16:48:37.831093 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.890449 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.890500 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.890509 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.890527 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.890561 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:37Z","lastTransitionTime":"2025-11-25T16:48:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.993287 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.993336 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.993345 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.993360 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:37 crc kubenswrapper[4812]: I1125 16:48:37.993370 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:37Z","lastTransitionTime":"2025-11-25T16:48:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.095847 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.095891 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.095901 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.095917 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.095929 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:38Z","lastTransitionTime":"2025-11-25T16:48:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.197994 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.198054 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.198064 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.198082 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.198093 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:38Z","lastTransitionTime":"2025-11-25T16:48:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.302455 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.302557 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.302568 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.302585 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.302595 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:38Z","lastTransitionTime":"2025-11-25T16:48:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.405425 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.405487 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.405497 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.405512 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.405521 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:38Z","lastTransitionTime":"2025-11-25T16:48:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.508121 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.508178 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.508190 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.508212 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.508226 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:38Z","lastTransitionTime":"2025-11-25T16:48:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.611807 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.611866 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.611882 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.611911 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.611932 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:38Z","lastTransitionTime":"2025-11-25T16:48:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.715026 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.715097 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.715114 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.715142 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.715162 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:38Z","lastTransitionTime":"2025-11-25T16:48:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.817643 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.817723 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.817736 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.817757 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.817769 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:38Z","lastTransitionTime":"2025-11-25T16:48:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.831218 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:48:38 crc kubenswrapper[4812]: E1125 16:48:38.831394 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.919992 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.920021 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.920029 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.920041 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:38 crc kubenswrapper[4812]: I1125 16:48:38.920058 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:38Z","lastTransitionTime":"2025-11-25T16:48:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.022232 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.022297 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.022314 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.022341 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.022362 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:39Z","lastTransitionTime":"2025-11-25T16:48:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.126006 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.126044 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.126054 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.126070 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.126081 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:39Z","lastTransitionTime":"2025-11-25T16:48:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.228491 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.228550 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.228563 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.228580 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.228591 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:39Z","lastTransitionTime":"2025-11-25T16:48:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.331387 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.331436 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.331447 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.331465 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.331479 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:39Z","lastTransitionTime":"2025-11-25T16:48:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.433922 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.433967 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.433977 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.433994 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.434007 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:39Z","lastTransitionTime":"2025-11-25T16:48:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.536397 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.536446 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.536457 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.536473 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.536486 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:39Z","lastTransitionTime":"2025-11-25T16:48:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.638867 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.638914 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.638926 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.638944 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.638959 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:39Z","lastTransitionTime":"2025-11-25T16:48:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.741059 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.741097 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.741104 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.741120 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.741129 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:39Z","lastTransitionTime":"2025-11-25T16:48:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.831023 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.831023 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:48:39 crc kubenswrapper[4812]: E1125 16:48:39.831236 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.831257 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:48:39 crc kubenswrapper[4812]: E1125 16:48:39.831513 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:48:39 crc kubenswrapper[4812]: E1125 16:48:39.831746 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.843838 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.843874 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.843886 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.843901 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.843912 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:39Z","lastTransitionTime":"2025-11-25T16:48:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.946506 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.946566 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.946583 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.946600 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:39 crc kubenswrapper[4812]: I1125 16:48:39.946609 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:39Z","lastTransitionTime":"2025-11-25T16:48:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.049344 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.049387 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.049395 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.049410 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.049420 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:40Z","lastTransitionTime":"2025-11-25T16:48:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.152081 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.152123 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.152131 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.152143 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.152152 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:40Z","lastTransitionTime":"2025-11-25T16:48:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.254740 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.254781 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.254791 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.254805 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.254814 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:40Z","lastTransitionTime":"2025-11-25T16:48:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.357800 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.357848 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.357864 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.357884 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.357900 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:40Z","lastTransitionTime":"2025-11-25T16:48:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.462688 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.462773 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.462798 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.462828 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.462861 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:40Z","lastTransitionTime":"2025-11-25T16:48:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.565935 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.566006 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.566025 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.566048 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.566065 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:40Z","lastTransitionTime":"2025-11-25T16:48:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.669391 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.669455 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.669474 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.669499 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.669517 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:40Z","lastTransitionTime":"2025-11-25T16:48:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.772961 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.772994 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.773003 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.773019 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.773030 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:40Z","lastTransitionTime":"2025-11-25T16:48:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.831389 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:48:40 crc kubenswrapper[4812]: E1125 16:48:40.831599 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.833179 4812 scope.go:117] "RemoveContainer" containerID="94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a" Nov 25 16:48:40 crc kubenswrapper[4812]: E1125 16:48:40.833523 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hwqsk_openshift-ovn-kubernetes(bc4dc9ff-11a1-4151-91f0-3ff83020b3b9)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.877016 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.877092 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.877104 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.877127 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.877141 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:40Z","lastTransitionTime":"2025-11-25T16:48:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.980160 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.980204 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.980212 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.980229 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:40 crc kubenswrapper[4812]: I1125 16:48:40.980240 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:40Z","lastTransitionTime":"2025-11-25T16:48:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.082374 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.082408 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.082420 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.082437 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.082457 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:41Z","lastTransitionTime":"2025-11-25T16:48:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.184793 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.184826 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.184835 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.184851 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.184861 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:41Z","lastTransitionTime":"2025-11-25T16:48:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.287265 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.287328 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.287347 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.287371 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.287389 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:41Z","lastTransitionTime":"2025-11-25T16:48:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.390642 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.390745 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.390771 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.390802 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.390822 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:41Z","lastTransitionTime":"2025-11-25T16:48:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.494187 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.494242 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.494260 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.494282 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.494298 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:41Z","lastTransitionTime":"2025-11-25T16:48:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.596667 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.596728 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.596747 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.596773 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.596791 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:41Z","lastTransitionTime":"2025-11-25T16:48:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.699489 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.699521 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.699558 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.699574 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.699587 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:41Z","lastTransitionTime":"2025-11-25T16:48:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.801746 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.801817 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.801829 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.801844 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.801855 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:41Z","lastTransitionTime":"2025-11-25T16:48:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.830722 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.830737 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:48:41 crc kubenswrapper[4812]: E1125 16:48:41.830993 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.830747 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:48:41 crc kubenswrapper[4812]: E1125 16:48:41.831324 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:48:41 crc kubenswrapper[4812]: E1125 16:48:41.831475 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.903414 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.903475 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.903484 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.903497 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:41 crc kubenswrapper[4812]: I1125 16:48:41.903517 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:41Z","lastTransitionTime":"2025-11-25T16:48:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.005680 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.005715 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.005723 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.005737 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.005746 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:42Z","lastTransitionTime":"2025-11-25T16:48:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.107929 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.107963 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.107974 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.107991 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.108003 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:42Z","lastTransitionTime":"2025-11-25T16:48:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.210254 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.210302 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.210345 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.210361 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.210371 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:42Z","lastTransitionTime":"2025-11-25T16:48:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.312418 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.312474 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.312487 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.312502 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.312512 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:42Z","lastTransitionTime":"2025-11-25T16:48:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.414876 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.414927 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.414935 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.414949 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.414959 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:42Z","lastTransitionTime":"2025-11-25T16:48:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.517577 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.517608 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.517616 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.517629 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.517640 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:42Z","lastTransitionTime":"2025-11-25T16:48:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.620153 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.620200 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.620217 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.620241 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.620258 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:42Z","lastTransitionTime":"2025-11-25T16:48:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.722654 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.722717 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.722736 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.722760 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.722776 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:42Z","lastTransitionTime":"2025-11-25T16:48:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.828871 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.829225 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.829241 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.829256 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.829265 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:42Z","lastTransitionTime":"2025-11-25T16:48:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.830931 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:48:42 crc kubenswrapper[4812]: E1125 16:48:42.831281 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.931222 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.931287 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.931300 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.931316 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:42 crc kubenswrapper[4812]: I1125 16:48:42.931325 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:42Z","lastTransitionTime":"2025-11-25T16:48:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.034311 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.034351 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.034362 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.034379 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.034391 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:43Z","lastTransitionTime":"2025-11-25T16:48:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.136448 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.136485 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.136496 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.136511 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.136521 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:43Z","lastTransitionTime":"2025-11-25T16:48:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.239381 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.239421 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.239431 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.239445 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.239454 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:43Z","lastTransitionTime":"2025-11-25T16:48:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.341767 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.341824 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.341834 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.341849 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.341858 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:43Z","lastTransitionTime":"2025-11-25T16:48:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.444151 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.444196 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.444205 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.444217 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.444225 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:43Z","lastTransitionTime":"2025-11-25T16:48:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.546424 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.546497 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.546512 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.546584 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.546604 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:43Z","lastTransitionTime":"2025-11-25T16:48:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.648818 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.648859 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.648867 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.648883 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.648892 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:43Z","lastTransitionTime":"2025-11-25T16:48:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.751428 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.751471 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.751482 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.751500 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.751511 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:43Z","lastTransitionTime":"2025-11-25T16:48:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.831546 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.831696 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:48:43 crc kubenswrapper[4812]: E1125 16:48:43.831836 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.831880 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:48:43 crc kubenswrapper[4812]: E1125 16:48:43.832148 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:48:43 crc kubenswrapper[4812]: E1125 16:48:43.832295 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.853450 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.853499 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.853518 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.853611 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.853629 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:43Z","lastTransitionTime":"2025-11-25T16:48:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.956486 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.956546 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.956557 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.956573 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:43 crc kubenswrapper[4812]: I1125 16:48:43.956587 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:43Z","lastTransitionTime":"2025-11-25T16:48:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.059435 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.059468 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.059478 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.059491 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.059501 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:44Z","lastTransitionTime":"2025-11-25T16:48:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.161877 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.161918 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.161932 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.161950 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.161962 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:44Z","lastTransitionTime":"2025-11-25T16:48:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.264359 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.264398 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.264408 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.264423 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.264432 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:44Z","lastTransitionTime":"2025-11-25T16:48:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.366792 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.366845 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.366854 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.366869 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.366878 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:44Z","lastTransitionTime":"2025-11-25T16:48:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.468985 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.469063 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.469073 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.469088 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.469098 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:44Z","lastTransitionTime":"2025-11-25T16:48:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.473473 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.473512 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.473521 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.473548 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.473561 4812 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T16:48:44Z","lastTransitionTime":"2025-11-25T16:48:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.512493 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-m4hxq"] Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.512883 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-m4hxq" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.514491 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.514959 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.515278 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.515465 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.539428 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=46.53941008 podStartE2EDuration="46.53941008s" podCreationTimestamp="2025-11-25 16:47:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:48:44.527662637 +0000 UTC m=+99.367804782" watchObservedRunningTime="2025-11-25 16:48:44.53941008 +0000 UTC m=+99.379552185" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.562258 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dc0a5f0d-305a-4f3e-9785-cde0f931d306-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-m4hxq\" (UID: \"dc0a5f0d-305a-4f3e-9785-cde0f931d306\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-m4hxq" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.562328 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dc0a5f0d-305a-4f3e-9785-cde0f931d306-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-m4hxq\" (UID: \"dc0a5f0d-305a-4f3e-9785-cde0f931d306\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-m4hxq" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.562353 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/dc0a5f0d-305a-4f3e-9785-cde0f931d306-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-m4hxq\" (UID: \"dc0a5f0d-305a-4f3e-9785-cde0f931d306\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-m4hxq" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.562381 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/dc0a5f0d-305a-4f3e-9785-cde0f931d306-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-m4hxq\" (UID: \"dc0a5f0d-305a-4f3e-9785-cde0f931d306\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-m4hxq" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.562429 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/dc0a5f0d-305a-4f3e-9785-cde0f931d306-service-ca\") pod \"cluster-version-operator-5c965bbfc6-m4hxq\" (UID: \"dc0a5f0d-305a-4f3e-9785-cde0f931d306\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-m4hxq" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.566567 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podStartSLOduration=77.566524004 podStartE2EDuration="1m17.566524004s" podCreationTimestamp="2025-11-25 16:47:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:48:44.566500813 +0000 UTC m=+99.406642908" watchObservedRunningTime="2025-11-25 16:48:44.566524004 +0000 UTC m=+99.406666099" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.594141 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-m7ndd" podStartSLOduration=77.594119571 podStartE2EDuration="1m17.594119571s" podCreationTimestamp="2025-11-25 16:47:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:48:44.593955197 +0000 UTC m=+99.434097312" watchObservedRunningTime="2025-11-25 16:48:44.594119571 +0000 UTC m=+99.434261666" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.594385 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-gljt8" podStartSLOduration=77.594378838 podStartE2EDuration="1m17.594378838s" podCreationTimestamp="2025-11-25 16:47:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:48:44.582985494 +0000 UTC m=+99.423127609" watchObservedRunningTime="2025-11-25 16:48:44.594378838 +0000 UTC m=+99.434520933" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.628054 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=73.628033397 podStartE2EDuration="1m13.628033397s" podCreationTimestamp="2025-11-25 16:47:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:48:44.627899524 +0000 UTC m=+99.468041639" watchObservedRunningTime="2025-11-25 16:48:44.628033397 +0000 UTC m=+99.468175512" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.663341 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dc0a5f0d-305a-4f3e-9785-cde0f931d306-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-m4hxq\" (UID: \"dc0a5f0d-305a-4f3e-9785-cde0f931d306\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-m4hxq" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.663444 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/dc0a5f0d-305a-4f3e-9785-cde0f931d306-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-m4hxq\" (UID: \"dc0a5f0d-305a-4f3e-9785-cde0f931d306\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-m4hxq" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.663478 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dc0a5f0d-305a-4f3e-9785-cde0f931d306-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-m4hxq\" (UID: \"dc0a5f0d-305a-4f3e-9785-cde0f931d306\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-m4hxq" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.663546 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/dc0a5f0d-305a-4f3e-9785-cde0f931d306-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-m4hxq\" (UID: \"dc0a5f0d-305a-4f3e-9785-cde0f931d306\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-m4hxq" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.663579 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/dc0a5f0d-305a-4f3e-9785-cde0f931d306-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-m4hxq\" (UID: \"dc0a5f0d-305a-4f3e-9785-cde0f931d306\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-m4hxq" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.663589 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/dc0a5f0d-305a-4f3e-9785-cde0f931d306-service-ca\") pod \"cluster-version-operator-5c965bbfc6-m4hxq\" (UID: \"dc0a5f0d-305a-4f3e-9785-cde0f931d306\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-m4hxq" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.663673 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/dc0a5f0d-305a-4f3e-9785-cde0f931d306-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-m4hxq\" (UID: \"dc0a5f0d-305a-4f3e-9785-cde0f931d306\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-m4hxq" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.664439 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/dc0a5f0d-305a-4f3e-9785-cde0f931d306-service-ca\") pod \"cluster-version-operator-5c965bbfc6-m4hxq\" (UID: \"dc0a5f0d-305a-4f3e-9785-cde0f931d306\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-m4hxq" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.675739 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dc0a5f0d-305a-4f3e-9785-cde0f931d306-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-m4hxq\" (UID: \"dc0a5f0d-305a-4f3e-9785-cde0f931d306\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-m4hxq" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.678773 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dc0a5f0d-305a-4f3e-9785-cde0f931d306-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-m4hxq\" (UID: \"dc0a5f0d-305a-4f3e-9785-cde0f931d306\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-m4hxq" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.698487 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-dgtfm" podStartSLOduration=79.698463689 podStartE2EDuration="1m19.698463689s" podCreationTimestamp="2025-11-25 16:47:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:48:44.689004816 +0000 UTC m=+99.529146921" watchObservedRunningTime="2025-11-25 16:48:44.698463689 +0000 UTC m=+99.538605784" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.698611 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-99qrk" podStartSLOduration=77.698604833 podStartE2EDuration="1m17.698604833s" podCreationTimestamp="2025-11-25 16:47:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:48:44.698348256 +0000 UTC m=+99.538490341" watchObservedRunningTime="2025-11-25 16:48:44.698604833 +0000 UTC m=+99.538746948" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.737103 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=13.737086571 podStartE2EDuration="13.737086571s" podCreationTimestamp="2025-11-25 16:48:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:48:44.736926376 +0000 UTC m=+99.577068471" watchObservedRunningTime="2025-11-25 16:48:44.737086571 +0000 UTC m=+99.577228666" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.765498 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=79.76548168 podStartE2EDuration="1m19.76548168s" podCreationTimestamp="2025-11-25 16:47:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:48:44.763871146 +0000 UTC m=+99.604013241" watchObservedRunningTime="2025-11-25 16:48:44.76548168 +0000 UTC m=+99.605623775" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.774398 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2fft" podStartSLOduration=76.774376627 podStartE2EDuration="1m16.774376627s" podCreationTimestamp="2025-11-25 16:47:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:48:44.773957586 +0000 UTC m=+99.614099691" watchObservedRunningTime="2025-11-25 16:48:44.774376627 +0000 UTC m=+99.614518722" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.797916 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=75.797902296 podStartE2EDuration="1m15.797902296s" podCreationTimestamp="2025-11-25 16:47:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:48:44.796396076 +0000 UTC m=+99.636538191" watchObservedRunningTime="2025-11-25 16:48:44.797902296 +0000 UTC m=+99.638044381" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.825137 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-m4hxq" Nov 25 16:48:44 crc kubenswrapper[4812]: I1125 16:48:44.831247 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:48:44 crc kubenswrapper[4812]: E1125 16:48:44.831373 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:48:45 crc kubenswrapper[4812]: I1125 16:48:45.292846 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-m4hxq" event={"ID":"dc0a5f0d-305a-4f3e-9785-cde0f931d306","Type":"ContainerStarted","Data":"fa07d7f13550e12045ba74df36bee1922973c69e0bccee275ad1fb96e6917fce"} Nov 25 16:48:45 crc kubenswrapper[4812]: I1125 16:48:45.293191 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-m4hxq" event={"ID":"dc0a5f0d-305a-4f3e-9785-cde0f931d306","Type":"ContainerStarted","Data":"50010a17d3d812323d64a7752cbca173150340b2eb6b19bf65a89995dc29d1f9"} Nov 25 16:48:45 crc kubenswrapper[4812]: I1125 16:48:45.308028 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-m4hxq" podStartSLOduration=78.308014692 podStartE2EDuration="1m18.308014692s" podCreationTimestamp="2025-11-25 16:47:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:48:45.307759166 +0000 UTC m=+100.147901321" watchObservedRunningTime="2025-11-25 16:48:45.308014692 +0000 UTC m=+100.148156787" Nov 25 16:48:45 crc kubenswrapper[4812]: I1125 16:48:45.470480 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fbb57832-3993-492b-80c9-a6a61891a125-metrics-certs\") pod \"network-metrics-daemon-82fvc\" (UID: \"fbb57832-3993-492b-80c9-a6a61891a125\") " pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:48:45 crc kubenswrapper[4812]: E1125 16:48:45.470673 4812 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 16:48:45 crc kubenswrapper[4812]: E1125 16:48:45.470761 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fbb57832-3993-492b-80c9-a6a61891a125-metrics-certs podName:fbb57832-3993-492b-80c9-a6a61891a125 nodeName:}" failed. No retries permitted until 2025-11-25 16:49:49.470743009 +0000 UTC m=+164.310885104 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fbb57832-3993-492b-80c9-a6a61891a125-metrics-certs") pod "network-metrics-daemon-82fvc" (UID: "fbb57832-3993-492b-80c9-a6a61891a125") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 16:48:45 crc kubenswrapper[4812]: I1125 16:48:45.831473 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:48:45 crc kubenswrapper[4812]: I1125 16:48:45.831556 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:48:45 crc kubenswrapper[4812]: I1125 16:48:45.831498 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:48:45 crc kubenswrapper[4812]: E1125 16:48:45.832751 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:48:45 crc kubenswrapper[4812]: E1125 16:48:45.832861 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:48:45 crc kubenswrapper[4812]: E1125 16:48:45.832913 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:48:46 crc kubenswrapper[4812]: I1125 16:48:46.830445 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:48:46 crc kubenswrapper[4812]: E1125 16:48:46.830620 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:48:47 crc kubenswrapper[4812]: I1125 16:48:47.831283 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:48:47 crc kubenswrapper[4812]: I1125 16:48:47.831292 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:48:47 crc kubenswrapper[4812]: E1125 16:48:47.831426 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:48:47 crc kubenswrapper[4812]: I1125 16:48:47.831501 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:48:47 crc kubenswrapper[4812]: E1125 16:48:47.831868 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:48:47 crc kubenswrapper[4812]: E1125 16:48:47.832026 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:48:48 crc kubenswrapper[4812]: I1125 16:48:48.831075 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:48:48 crc kubenswrapper[4812]: E1125 16:48:48.831330 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:48:49 crc kubenswrapper[4812]: I1125 16:48:49.831060 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:48:49 crc kubenswrapper[4812]: I1125 16:48:49.831060 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:48:49 crc kubenswrapper[4812]: E1125 16:48:49.831271 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:48:49 crc kubenswrapper[4812]: E1125 16:48:49.831367 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:48:49 crc kubenswrapper[4812]: I1125 16:48:49.831085 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:48:49 crc kubenswrapper[4812]: E1125 16:48:49.831560 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:48:50 crc kubenswrapper[4812]: I1125 16:48:50.830998 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:48:50 crc kubenswrapper[4812]: E1125 16:48:50.831153 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:48:51 crc kubenswrapper[4812]: I1125 16:48:51.830845 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:48:51 crc kubenswrapper[4812]: I1125 16:48:51.830945 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:48:51 crc kubenswrapper[4812]: I1125 16:48:51.831039 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:48:51 crc kubenswrapper[4812]: E1125 16:48:51.831046 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:48:51 crc kubenswrapper[4812]: E1125 16:48:51.831218 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:48:51 crc kubenswrapper[4812]: E1125 16:48:51.831328 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:48:52 crc kubenswrapper[4812]: I1125 16:48:52.830984 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:48:52 crc kubenswrapper[4812]: E1125 16:48:52.831104 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:48:53 crc kubenswrapper[4812]: I1125 16:48:53.831485 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:48:53 crc kubenswrapper[4812]: I1125 16:48:53.831610 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:48:53 crc kubenswrapper[4812]: E1125 16:48:53.831703 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:48:53 crc kubenswrapper[4812]: I1125 16:48:53.831761 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:48:53 crc kubenswrapper[4812]: E1125 16:48:53.831895 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:48:53 crc kubenswrapper[4812]: E1125 16:48:53.832244 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:48:53 crc kubenswrapper[4812]: I1125 16:48:53.832850 4812 scope.go:117] "RemoveContainer" containerID="94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a" Nov 25 16:48:53 crc kubenswrapper[4812]: E1125 16:48:53.832998 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hwqsk_openshift-ovn-kubernetes(bc4dc9ff-11a1-4151-91f0-3ff83020b3b9)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" Nov 25 16:48:54 crc kubenswrapper[4812]: I1125 16:48:54.831023 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:48:54 crc kubenswrapper[4812]: E1125 16:48:54.831257 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:48:55 crc kubenswrapper[4812]: I1125 16:48:55.830867 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:48:55 crc kubenswrapper[4812]: I1125 16:48:55.832095 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:48:55 crc kubenswrapper[4812]: I1125 16:48:55.832211 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:48:55 crc kubenswrapper[4812]: E1125 16:48:55.832900 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:48:55 crc kubenswrapper[4812]: E1125 16:48:55.832305 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:48:55 crc kubenswrapper[4812]: E1125 16:48:55.832945 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:48:56 crc kubenswrapper[4812]: I1125 16:48:56.830943 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:48:56 crc kubenswrapper[4812]: E1125 16:48:56.831275 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:48:57 crc kubenswrapper[4812]: I1125 16:48:57.831349 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:48:57 crc kubenswrapper[4812]: I1125 16:48:57.831436 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:48:57 crc kubenswrapper[4812]: E1125 16:48:57.831578 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:48:57 crc kubenswrapper[4812]: I1125 16:48:57.831667 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:48:57 crc kubenswrapper[4812]: E1125 16:48:57.831764 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:48:57 crc kubenswrapper[4812]: E1125 16:48:57.831875 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:48:58 crc kubenswrapper[4812]: I1125 16:48:58.831210 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:48:58 crc kubenswrapper[4812]: E1125 16:48:58.831353 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:48:59 crc kubenswrapper[4812]: I1125 16:48:59.830581 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:48:59 crc kubenswrapper[4812]: I1125 16:48:59.830812 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:48:59 crc kubenswrapper[4812]: E1125 16:48:59.830983 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:48:59 crc kubenswrapper[4812]: I1125 16:48:59.831042 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:48:59 crc kubenswrapper[4812]: E1125 16:48:59.831135 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:48:59 crc kubenswrapper[4812]: E1125 16:48:59.831221 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:49:00 crc kubenswrapper[4812]: I1125 16:49:00.831315 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:49:00 crc kubenswrapper[4812]: E1125 16:49:00.831464 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:49:01 crc kubenswrapper[4812]: I1125 16:49:01.831588 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:49:01 crc kubenswrapper[4812]: E1125 16:49:01.831741 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:49:01 crc kubenswrapper[4812]: I1125 16:49:01.831591 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:49:01 crc kubenswrapper[4812]: I1125 16:49:01.831622 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:49:01 crc kubenswrapper[4812]: E1125 16:49:01.831937 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:49:01 crc kubenswrapper[4812]: E1125 16:49:01.832029 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:49:02 crc kubenswrapper[4812]: I1125 16:49:02.831266 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:49:02 crc kubenswrapper[4812]: E1125 16:49:02.831967 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:49:03 crc kubenswrapper[4812]: I1125 16:49:03.351776 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-m7ndd_3a156756-3629-4bed-8de0-1019226b7f04/kube-multus/1.log" Nov 25 16:49:03 crc kubenswrapper[4812]: I1125 16:49:03.352227 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-m7ndd_3a156756-3629-4bed-8de0-1019226b7f04/kube-multus/0.log" Nov 25 16:49:03 crc kubenswrapper[4812]: I1125 16:49:03.352273 4812 generic.go:334] "Generic (PLEG): container finished" podID="3a156756-3629-4bed-8de0-1019226b7f04" containerID="1eaf67b5a5abeace1075cd58bc8f217c42feab58eddaa10ddf6969b850c3f9f3" exitCode=1 Nov 25 16:49:03 crc kubenswrapper[4812]: I1125 16:49:03.352308 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-m7ndd" event={"ID":"3a156756-3629-4bed-8de0-1019226b7f04","Type":"ContainerDied","Data":"1eaf67b5a5abeace1075cd58bc8f217c42feab58eddaa10ddf6969b850c3f9f3"} Nov 25 16:49:03 crc kubenswrapper[4812]: I1125 16:49:03.352358 4812 scope.go:117] "RemoveContainer" containerID="720a9cb9ddced0be20edfe6b7dc94a928cc4a0a140791209825632388711f824" Nov 25 16:49:03 crc kubenswrapper[4812]: I1125 16:49:03.352824 4812 scope.go:117] "RemoveContainer" containerID="1eaf67b5a5abeace1075cd58bc8f217c42feab58eddaa10ddf6969b850c3f9f3" Nov 25 16:49:03 crc kubenswrapper[4812]: E1125 16:49:03.353165 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-m7ndd_openshift-multus(3a156756-3629-4bed-8de0-1019226b7f04)\"" pod="openshift-multus/multus-m7ndd" podUID="3a156756-3629-4bed-8de0-1019226b7f04" Nov 25 16:49:03 crc kubenswrapper[4812]: I1125 16:49:03.830758 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:49:03 crc kubenswrapper[4812]: E1125 16:49:03.831040 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:49:03 crc kubenswrapper[4812]: I1125 16:49:03.831045 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:49:03 crc kubenswrapper[4812]: E1125 16:49:03.831133 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:49:03 crc kubenswrapper[4812]: I1125 16:49:03.831074 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:49:03 crc kubenswrapper[4812]: E1125 16:49:03.831752 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:49:04 crc kubenswrapper[4812]: I1125 16:49:04.356884 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-m7ndd_3a156756-3629-4bed-8de0-1019226b7f04/kube-multus/1.log" Nov 25 16:49:04 crc kubenswrapper[4812]: I1125 16:49:04.830673 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:49:04 crc kubenswrapper[4812]: E1125 16:49:04.830851 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:49:04 crc kubenswrapper[4812]: I1125 16:49:04.831994 4812 scope.go:117] "RemoveContainer" containerID="94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a" Nov 25 16:49:04 crc kubenswrapper[4812]: E1125 16:49:04.832324 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hwqsk_openshift-ovn-kubernetes(bc4dc9ff-11a1-4151-91f0-3ff83020b3b9)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" Nov 25 16:49:05 crc kubenswrapper[4812]: I1125 16:49:05.831298 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:49:05 crc kubenswrapper[4812]: I1125 16:49:05.831328 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:49:05 crc kubenswrapper[4812]: E1125 16:49:05.833116 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:49:05 crc kubenswrapper[4812]: I1125 16:49:05.833185 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:49:05 crc kubenswrapper[4812]: E1125 16:49:05.833357 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:49:05 crc kubenswrapper[4812]: E1125 16:49:05.833442 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:49:05 crc kubenswrapper[4812]: E1125 16:49:05.855477 4812 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Nov 25 16:49:05 crc kubenswrapper[4812]: E1125 16:49:05.918141 4812 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 16:49:06 crc kubenswrapper[4812]: I1125 16:49:06.831137 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:49:06 crc kubenswrapper[4812]: E1125 16:49:06.831411 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:49:07 crc kubenswrapper[4812]: I1125 16:49:07.831609 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:49:07 crc kubenswrapper[4812]: E1125 16:49:07.832345 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:49:07 crc kubenswrapper[4812]: I1125 16:49:07.831624 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:49:07 crc kubenswrapper[4812]: E1125 16:49:07.832616 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:49:07 crc kubenswrapper[4812]: I1125 16:49:07.831619 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:49:07 crc kubenswrapper[4812]: E1125 16:49:07.832874 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:49:08 crc kubenswrapper[4812]: I1125 16:49:08.830740 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:49:08 crc kubenswrapper[4812]: E1125 16:49:08.830866 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:49:09 crc kubenswrapper[4812]: I1125 16:49:09.831290 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:49:09 crc kubenswrapper[4812]: I1125 16:49:09.831364 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:49:09 crc kubenswrapper[4812]: I1125 16:49:09.831361 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:49:09 crc kubenswrapper[4812]: E1125 16:49:09.831622 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:49:09 crc kubenswrapper[4812]: E1125 16:49:09.831784 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:49:09 crc kubenswrapper[4812]: E1125 16:49:09.831918 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:49:10 crc kubenswrapper[4812]: I1125 16:49:10.831394 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:49:10 crc kubenswrapper[4812]: E1125 16:49:10.831516 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:49:10 crc kubenswrapper[4812]: E1125 16:49:10.919292 4812 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 16:49:11 crc kubenswrapper[4812]: I1125 16:49:11.831200 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:49:11 crc kubenswrapper[4812]: I1125 16:49:11.831233 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:49:11 crc kubenswrapper[4812]: E1125 16:49:11.831399 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:49:11 crc kubenswrapper[4812]: E1125 16:49:11.831500 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:49:11 crc kubenswrapper[4812]: I1125 16:49:11.831240 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:49:11 crc kubenswrapper[4812]: E1125 16:49:11.831604 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:49:12 crc kubenswrapper[4812]: I1125 16:49:12.830887 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:49:12 crc kubenswrapper[4812]: E1125 16:49:12.831147 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:49:13 crc kubenswrapper[4812]: I1125 16:49:13.831479 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:49:13 crc kubenswrapper[4812]: I1125 16:49:13.831504 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:49:13 crc kubenswrapper[4812]: I1125 16:49:13.831496 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:49:13 crc kubenswrapper[4812]: E1125 16:49:13.831641 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:49:13 crc kubenswrapper[4812]: I1125 16:49:13.831906 4812 scope.go:117] "RemoveContainer" containerID="1eaf67b5a5abeace1075cd58bc8f217c42feab58eddaa10ddf6969b850c3f9f3" Nov 25 16:49:13 crc kubenswrapper[4812]: E1125 16:49:13.832038 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:49:13 crc kubenswrapper[4812]: E1125 16:49:13.832219 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:49:14 crc kubenswrapper[4812]: I1125 16:49:14.393648 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-m7ndd_3a156756-3629-4bed-8de0-1019226b7f04/kube-multus/1.log" Nov 25 16:49:14 crc kubenswrapper[4812]: I1125 16:49:14.394040 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-m7ndd" event={"ID":"3a156756-3629-4bed-8de0-1019226b7f04","Type":"ContainerStarted","Data":"7b60d73868c8041ba2714a8ca55f1c992e9dc9254dd14cf1495b4db1e4dad249"} Nov 25 16:49:14 crc kubenswrapper[4812]: I1125 16:49:14.831338 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:49:14 crc kubenswrapper[4812]: E1125 16:49:14.831569 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:49:15 crc kubenswrapper[4812]: I1125 16:49:15.830694 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:49:15 crc kubenswrapper[4812]: I1125 16:49:15.830768 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:49:15 crc kubenswrapper[4812]: I1125 16:49:15.831507 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:49:15 crc kubenswrapper[4812]: E1125 16:49:15.831499 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:49:15 crc kubenswrapper[4812]: E1125 16:49:15.831622 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:49:15 crc kubenswrapper[4812]: E1125 16:49:15.831733 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:49:15 crc kubenswrapper[4812]: I1125 16:49:15.832356 4812 scope.go:117] "RemoveContainer" containerID="94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a" Nov 25 16:49:15 crc kubenswrapper[4812]: E1125 16:49:15.919796 4812 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 16:49:16 crc kubenswrapper[4812]: I1125 16:49:16.405214 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hwqsk_bc4dc9ff-11a1-4151-91f0-3ff83020b3b9/ovnkube-controller/3.log" Nov 25 16:49:16 crc kubenswrapper[4812]: I1125 16:49:16.407319 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" event={"ID":"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9","Type":"ContainerStarted","Data":"dc30d632bc69bd00f77037cefacfbcf46c964ebbc8e8d8c286b86f80b420782d"} Nov 25 16:49:16 crc kubenswrapper[4812]: I1125 16:49:16.407839 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:49:16 crc kubenswrapper[4812]: I1125 16:49:16.435019 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" podStartSLOduration=109.435001202 podStartE2EDuration="1m49.435001202s" podCreationTimestamp="2025-11-25 16:47:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:16.433152603 +0000 UTC m=+131.273294718" watchObservedRunningTime="2025-11-25 16:49:16.435001202 +0000 UTC m=+131.275143297" Nov 25 16:49:16 crc kubenswrapper[4812]: I1125 16:49:16.672871 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-82fvc"] Nov 25 16:49:16 crc kubenswrapper[4812]: I1125 16:49:16.673002 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:49:16 crc kubenswrapper[4812]: E1125 16:49:16.673101 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:49:17 crc kubenswrapper[4812]: I1125 16:49:17.831248 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:49:17 crc kubenswrapper[4812]: I1125 16:49:17.831343 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:49:17 crc kubenswrapper[4812]: E1125 16:49:17.831907 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:49:17 crc kubenswrapper[4812]: I1125 16:49:17.831526 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:49:17 crc kubenswrapper[4812]: E1125 16:49:17.832083 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:49:17 crc kubenswrapper[4812]: E1125 16:49:17.832162 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:49:18 crc kubenswrapper[4812]: I1125 16:49:18.830563 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:49:18 crc kubenswrapper[4812]: E1125 16:49:18.830796 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:49:19 crc kubenswrapper[4812]: I1125 16:49:19.831493 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:49:19 crc kubenswrapper[4812]: E1125 16:49:19.831674 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 16:49:19 crc kubenswrapper[4812]: I1125 16:49:19.831518 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:49:19 crc kubenswrapper[4812]: I1125 16:49:19.831754 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:49:19 crc kubenswrapper[4812]: E1125 16:49:19.831850 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 16:49:19 crc kubenswrapper[4812]: E1125 16:49:19.831928 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 16:49:20 crc kubenswrapper[4812]: I1125 16:49:20.830666 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:49:20 crc kubenswrapper[4812]: E1125 16:49:20.830823 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-82fvc" podUID="fbb57832-3993-492b-80c9-a6a61891a125" Nov 25 16:49:21 crc kubenswrapper[4812]: I1125 16:49:21.830699 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:49:21 crc kubenswrapper[4812]: I1125 16:49:21.830834 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:49:21 crc kubenswrapper[4812]: I1125 16:49:21.831155 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:49:21 crc kubenswrapper[4812]: I1125 16:49:21.834299 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 25 16:49:21 crc kubenswrapper[4812]: I1125 16:49:21.834356 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 25 16:49:21 crc kubenswrapper[4812]: I1125 16:49:21.834990 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 25 16:49:21 crc kubenswrapper[4812]: I1125 16:49:21.835617 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 25 16:49:22 crc kubenswrapper[4812]: I1125 16:49:22.830965 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:49:22 crc kubenswrapper[4812]: I1125 16:49:22.834321 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 25 16:49:22 crc kubenswrapper[4812]: I1125 16:49:22.836991 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.250872 4812 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.295383 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nbb7l"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.296110 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nbb7l" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.296915 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-gvnqg"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.297249 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-gvnqg" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.298724 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.298968 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.299230 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.299746 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-2f9nk"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.300060 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-2f9nk" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.300075 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.300304 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.301491 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.301998 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-rqlx6"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.302441 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-rqlx6" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.303371 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.303501 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vntd5"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.303947 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vntd5" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.303990 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.308574 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-kfwch"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.309123 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-kfwch" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.310210 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.310347 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.310366 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.310410 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.310675 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.310689 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.310698 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.310920 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.310969 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.311041 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.310937 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.311127 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.311437 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.312069 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.312159 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.312254 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.314187 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-fjv5r"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.314714 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-9t6bj"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.315065 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-9t6bj" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.315490 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.316386 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.317879 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.318368 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.318574 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.317890 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.318880 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.317983 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.321595 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-hshsk"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.322312 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hshsk" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.322497 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-dd95m"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.322858 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-dd95m" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.323664 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.329131 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.329449 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.329662 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.329963 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.330130 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.330315 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.330466 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.330686 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.333570 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.333766 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-64j56"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.334513 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-64j56" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.337817 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.338090 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.338180 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.338249 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.338398 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.338508 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.338697 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.338996 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.339177 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.339301 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.339338 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.339502 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.339584 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.339706 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.339814 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.340933 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.341137 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.341274 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.341442 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.341447 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.341837 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-nj6w8"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.342665 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-nj6w8" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.343262 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-pxgkd"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.343935 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-pxgkd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.355929 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.356510 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.356960 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.356996 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.357013 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.357661 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.358065 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.369838 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.370455 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.371854 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.372653 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.373032 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.373376 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.373463 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.373636 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.373871 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.374914 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-td2bc"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.376591 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-td2bc" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.379571 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-r74vx"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.380284 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-hw9bx"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.381076 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-4chwd"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.381906 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.382438 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-r74vx" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.383083 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-hw9bx" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.384399 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-5gv9p"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.384895 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-5gv9p" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.388102 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.398391 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.399002 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-splm8"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.399212 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.399634 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-splm8" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.401704 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.402206 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.402462 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.402571 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.404973 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.405680 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.406911 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.412835 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-5hrtq"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.413593 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-5hrtq" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.414965 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415001 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-console-oauth-config\") pod \"console-f9d7485db-nj6w8\" (UID: \"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf\") " pod="openshift-console/console-f9d7485db-nj6w8" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415024 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415050 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c4b1ffb1-0cb4-4429-9eb4-e3f994b2b6c8-config\") pod \"machine-approver-56656f9798-hshsk\" (UID: \"c4b1ffb1-0cb4-4429-9eb4-e3f994b2b6c8\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hshsk" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415071 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415090 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415109 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/64f34f9a-3fdc-492d-a75f-93e4a3a8727f-stats-auth\") pod \"router-default-5444994796-pxgkd\" (UID: \"64f34f9a-3fdc-492d-a75f-93e4a3a8727f\") " pod="openshift-ingress/router-default-5444994796-pxgkd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415131 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/46c765da-7def-4c6e-8ac2-8da853bbb378-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-rqlx6\" (UID: \"46c765da-7def-4c6e-8ac2-8da853bbb378\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-rqlx6" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415162 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tcmfj\" (UniqueName: \"kubernetes.io/projected/3cc2b341-1b08-45ca-970f-b64350fbe88e-kube-api-access-tcmfj\") pod \"machine-api-operator-5694c8668f-kfwch\" (UID: \"3cc2b341-1b08-45ca-970f-b64350fbe88e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-kfwch" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415185 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3c9ec92e-e01e-4a37-891a-ddc2293f0ced-config\") pod \"openshift-apiserver-operator-796bbdcf4f-nbb7l\" (UID: \"3c9ec92e-e01e-4a37-891a-ddc2293f0ced\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nbb7l" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415205 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/01a09641-0222-4bd8-af33-bf92edcc229c-client-ca\") pod \"controller-manager-879f6c89f-9t6bj\" (UID: \"01a09641-0222-4bd8-af33-bf92edcc229c\") " pod="openshift-controller-manager/controller-manager-879f6c89f-9t6bj" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415226 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/464e93da-ad92-4165-b1c2-6cede11ac006-available-featuregates\") pod \"openshift-config-operator-7777fb866f-64j56\" (UID: \"464e93da-ad92-4165-b1c2-6cede11ac006\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-64j56" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415248 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/64f34f9a-3fdc-492d-a75f-93e4a3a8727f-default-certificate\") pod \"router-default-5444994796-pxgkd\" (UID: \"64f34f9a-3fdc-492d-a75f-93e4a3a8727f\") " pod="openshift-ingress/router-default-5444994796-pxgkd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415271 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c4b1ffb1-0cb4-4429-9eb4-e3f994b2b6c8-auth-proxy-config\") pod \"machine-approver-56656f9798-hshsk\" (UID: \"c4b1ffb1-0cb4-4429-9eb4-e3f994b2b6c8\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hshsk" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415293 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-oauth-serving-cert\") pod \"console-f9d7485db-nj6w8\" (UID: \"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf\") " pod="openshift-console/console-f9d7485db-nj6w8" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415314 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415334 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/46c765da-7def-4c6e-8ac2-8da853bbb378-audit-policies\") pod \"apiserver-7bbb656c7d-rqlx6\" (UID: \"46c765da-7def-4c6e-8ac2-8da853bbb378\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-rqlx6" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415353 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/8bad98aa-94be-4024-8cb5-dc6078ffec1f-audit-policies\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415377 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-service-ca\") pod \"console-f9d7485db-nj6w8\" (UID: \"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf\") " pod="openshift-console/console-f9d7485db-nj6w8" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415397 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4bmmn\" (UniqueName: \"kubernetes.io/projected/8bad98aa-94be-4024-8cb5-dc6078ffec1f-kube-api-access-4bmmn\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415419 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b4qb5\" (UniqueName: \"kubernetes.io/projected/3c9ec92e-e01e-4a37-891a-ddc2293f0ced-kube-api-access-b4qb5\") pod \"openshift-apiserver-operator-796bbdcf4f-nbb7l\" (UID: \"3c9ec92e-e01e-4a37-891a-ddc2293f0ced\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nbb7l" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415442 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/c4b1ffb1-0cb4-4429-9eb4-e3f994b2b6c8-machine-approver-tls\") pod \"machine-approver-56656f9798-hshsk\" (UID: \"c4b1ffb1-0cb4-4429-9eb4-e3f994b2b6c8\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hshsk" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415457 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415474 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415494 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/46c765da-7def-4c6e-8ac2-8da853bbb378-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-rqlx6\" (UID: \"46c765da-7def-4c6e-8ac2-8da853bbb378\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-rqlx6" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415511 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01a09641-0222-4bd8-af33-bf92edcc229c-config\") pod \"controller-manager-879f6c89f-9t6bj\" (UID: \"01a09641-0222-4bd8-af33-bf92edcc229c\") " pod="openshift-controller-manager/controller-manager-879f6c89f-9t6bj" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415548 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kxzxx\" (UniqueName: \"kubernetes.io/projected/4662d425-aec1-4e58-845b-36ae7574da7a-kube-api-access-kxzxx\") pod \"dns-operator-744455d44c-td2bc\" (UID: \"4662d425-aec1-4e58-845b-36ae7574da7a\") " pod="openshift-dns-operator/dns-operator-744455d44c-td2bc" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415564 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/64f34f9a-3fdc-492d-a75f-93e4a3a8727f-service-ca-bundle\") pod \"router-default-5444994796-pxgkd\" (UID: \"64f34f9a-3fdc-492d-a75f-93e4a3a8727f\") " pod="openshift-ingress/router-default-5444994796-pxgkd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415599 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3c9ec92e-e01e-4a37-891a-ddc2293f0ced-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-nbb7l\" (UID: \"3c9ec92e-e01e-4a37-891a-ddc2293f0ced\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nbb7l" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415616 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3cc2b341-1b08-45ca-970f-b64350fbe88e-config\") pod \"machine-api-operator-5694c8668f-kfwch\" (UID: \"3cc2b341-1b08-45ca-970f-b64350fbe88e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-kfwch" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415631 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/464e93da-ad92-4165-b1c2-6cede11ac006-serving-cert\") pod \"openshift-config-operator-7777fb866f-64j56\" (UID: \"464e93da-ad92-4165-b1c2-6cede11ac006\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-64j56" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415646 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/46c765da-7def-4c6e-8ac2-8da853bbb378-encryption-config\") pod \"apiserver-7bbb656c7d-rqlx6\" (UID: \"46c765da-7def-4c6e-8ac2-8da853bbb378\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-rqlx6" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415661 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mfw2f\" (UniqueName: \"kubernetes.io/projected/135cae13-5b75-4d98-9c17-61448faddf90-kube-api-access-mfw2f\") pod \"downloads-7954f5f757-dd95m\" (UID: \"135cae13-5b75-4d98-9c17-61448faddf90\") " pod="openshift-console/downloads-7954f5f757-dd95m" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415674 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/8bad98aa-94be-4024-8cb5-dc6078ffec1f-audit-dir\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415694 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415713 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2wzj5\" (UniqueName: \"kubernetes.io/projected/64f34f9a-3fdc-492d-a75f-93e4a3a8727f-kube-api-access-2wzj5\") pod \"router-default-5444994796-pxgkd\" (UID: \"64f34f9a-3fdc-492d-a75f-93e4a3a8727f\") " pod="openshift-ingress/router-default-5444994796-pxgkd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415739 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m78w5\" (UniqueName: \"kubernetes.io/projected/01a09641-0222-4bd8-af33-bf92edcc229c-kube-api-access-m78w5\") pod \"controller-manager-879f6c89f-9t6bj\" (UID: \"01a09641-0222-4bd8-af33-bf92edcc229c\") " pod="openshift-controller-manager/controller-manager-879f6c89f-9t6bj" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415753 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/46c765da-7def-4c6e-8ac2-8da853bbb378-serving-cert\") pod \"apiserver-7bbb656c7d-rqlx6\" (UID: \"46c765da-7def-4c6e-8ac2-8da853bbb378\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-rqlx6" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415768 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/4662d425-aec1-4e58-845b-36ae7574da7a-metrics-tls\") pod \"dns-operator-744455d44c-td2bc\" (UID: \"4662d425-aec1-4e58-845b-36ae7574da7a\") " pod="openshift-dns-operator/dns-operator-744455d44c-td2bc" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415784 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01a09641-0222-4bd8-af33-bf92edcc229c-serving-cert\") pod \"controller-manager-879f6c89f-9t6bj\" (UID: \"01a09641-0222-4bd8-af33-bf92edcc229c\") " pod="openshift-controller-manager/controller-manager-879f6c89f-9t6bj" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415800 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tlfps\" (UniqueName: \"kubernetes.io/projected/c4b1ffb1-0cb4-4429-9eb4-e3f994b2b6c8-kube-api-access-tlfps\") pod \"machine-approver-56656f9798-hshsk\" (UID: \"c4b1ffb1-0cb4-4429-9eb4-e3f994b2b6c8\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hshsk" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415815 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r5gk8\" (UniqueName: \"kubernetes.io/projected/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-kube-api-access-r5gk8\") pod \"console-f9d7485db-nj6w8\" (UID: \"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf\") " pod="openshift-console/console-f9d7485db-nj6w8" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415832 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/01a09641-0222-4bd8-af33-bf92edcc229c-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-9t6bj\" (UID: \"01a09641-0222-4bd8-af33-bf92edcc229c\") " pod="openshift-controller-manager/controller-manager-879f6c89f-9t6bj" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415847 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rkd8c\" (UniqueName: \"kubernetes.io/projected/464e93da-ad92-4165-b1c2-6cede11ac006-kube-api-access-rkd8c\") pod \"openshift-config-operator-7777fb866f-64j56\" (UID: \"464e93da-ad92-4165-b1c2-6cede11ac006\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-64j56" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415864 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/64f34f9a-3fdc-492d-a75f-93e4a3a8727f-metrics-certs\") pod \"router-default-5444994796-pxgkd\" (UID: \"64f34f9a-3fdc-492d-a75f-93e4a3a8727f\") " pod="openshift-ingress/router-default-5444994796-pxgkd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415879 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-trusted-ca-bundle\") pod \"console-f9d7485db-nj6w8\" (UID: \"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf\") " pod="openshift-console/console-f9d7485db-nj6w8" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415893 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/46c765da-7def-4c6e-8ac2-8da853bbb378-audit-dir\") pod \"apiserver-7bbb656c7d-rqlx6\" (UID: \"46c765da-7def-4c6e-8ac2-8da853bbb378\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-rqlx6" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415907 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6a4b74ae-6e26-4983-b8aa-08f4a9935aca-service-ca-bundle\") pod \"authentication-operator-69f744f599-gvnqg\" (UID: \"6a4b74ae-6e26-4983-b8aa-08f4a9935aca\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gvnqg" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415921 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-console-serving-cert\") pod \"console-f9d7485db-nj6w8\" (UID: \"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf\") " pod="openshift-console/console-f9d7485db-nj6w8" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415939 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x6jfx\" (UniqueName: \"kubernetes.io/projected/b887db51-86fd-44fb-b146-21b546ae5345-kube-api-access-x6jfx\") pod \"cluster-image-registry-operator-dc59b4c8b-2f9nk\" (UID: \"b887db51-86fd-44fb-b146-21b546ae5345\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-2f9nk" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415954 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415971 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kgstc\" (UniqueName: \"kubernetes.io/projected/6a4b74ae-6e26-4983-b8aa-08f4a9935aca-kube-api-access-kgstc\") pod \"authentication-operator-69f744f599-gvnqg\" (UID: \"6a4b74ae-6e26-4983-b8aa-08f4a9935aca\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gvnqg" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.415987 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.416002 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/46c765da-7def-4c6e-8ac2-8da853bbb378-etcd-client\") pod \"apiserver-7bbb656c7d-rqlx6\" (UID: \"46c765da-7def-4c6e-8ac2-8da853bbb378\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-rqlx6" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.416017 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b887db51-86fd-44fb-b146-21b546ae5345-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-2f9nk\" (UID: \"b887db51-86fd-44fb-b146-21b546ae5345\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-2f9nk" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.416037 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b887db51-86fd-44fb-b146-21b546ae5345-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-2f9nk\" (UID: \"b887db51-86fd-44fb-b146-21b546ae5345\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-2f9nk" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.416054 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6npqx\" (UniqueName: \"kubernetes.io/projected/bf199acf-8845-4841-b653-d3f4b704f224-kube-api-access-6npqx\") pod \"cluster-samples-operator-665b6dd947-vntd5\" (UID: \"bf199acf-8845-4841-b653-d3f4b704f224\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vntd5" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.416090 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-thv4n\" (UniqueName: \"kubernetes.io/projected/46c765da-7def-4c6e-8ac2-8da853bbb378-kube-api-access-thv4n\") pod \"apiserver-7bbb656c7d-rqlx6\" (UID: \"46c765da-7def-4c6e-8ac2-8da853bbb378\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-rqlx6" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.416222 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a4b74ae-6e26-4983-b8aa-08f4a9935aca-config\") pod \"authentication-operator-69f744f599-gvnqg\" (UID: \"6a4b74ae-6e26-4983-b8aa-08f4a9935aca\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gvnqg" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.416252 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6a4b74ae-6e26-4983-b8aa-08f4a9935aca-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-gvnqg\" (UID: \"6a4b74ae-6e26-4983-b8aa-08f4a9935aca\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gvnqg" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.416369 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/3cc2b341-1b08-45ca-970f-b64350fbe88e-images\") pod \"machine-api-operator-5694c8668f-kfwch\" (UID: \"3cc2b341-1b08-45ca-970f-b64350fbe88e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-kfwch" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.416417 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/3cc2b341-1b08-45ca-970f-b64350fbe88e-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-kfwch\" (UID: \"3cc2b341-1b08-45ca-970f-b64350fbe88e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-kfwch" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.416465 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b887db51-86fd-44fb-b146-21b546ae5345-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-2f9nk\" (UID: \"b887db51-86fd-44fb-b146-21b546ae5345\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-2f9nk" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.416488 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-console-config\") pod \"console-f9d7485db-nj6w8\" (UID: \"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf\") " pod="openshift-console/console-f9d7485db-nj6w8" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.416508 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.416555 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf199acf-8845-4841-b653-d3f4b704f224-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-vntd5\" (UID: \"bf199acf-8845-4841-b653-d3f4b704f224\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vntd5" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.416590 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6a4b74ae-6e26-4983-b8aa-08f4a9935aca-serving-cert\") pod \"authentication-operator-69f744f599-gvnqg\" (UID: \"6a4b74ae-6e26-4983-b8aa-08f4a9935aca\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gvnqg" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.417657 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-sdrv4"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.417898 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.418154 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.418428 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.418923 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.419293 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.419593 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.419879 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.419894 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.419789 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.419839 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.419845 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.419780 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.423327 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.430571 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.431659 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-sdrv4" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.441546 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-wmngc"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.442156 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.445905 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.453814 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-wmngc" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.455197 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dfp6b"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.456007 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dfp6b" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.457500 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jz82q"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.457627 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.458572 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jz82q" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.459744 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.467515 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-fc6mw"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.468347 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-fc6mw" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.469160 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-h6c4h"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.470068 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.470354 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-w5fdp"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.471297 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w5fdp" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.480908 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.489084 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8p5fc"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.489901 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401485-brhb9"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.490292 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kxr75"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.490731 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kxr75" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.491421 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8p5fc" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.491594 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401485-brhb9" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.494028 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-v7lp8"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.495712 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7cvbf"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.496005 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sqpll"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.496414 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-v7lp8" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.497015 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7cvbf" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.501005 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-nkjmk"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.501797 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-pt5mz"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.502136 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-dgwd6"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.502762 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dgwd6" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.502861 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-pt5mz" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.502916 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.503063 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-nkjmk" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.503240 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sqpll" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.505827 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-brj9b"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.506269 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-brj9b" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.507478 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-x9d4d"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.508823 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-x9d4d" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.509309 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-rqlx6"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.511287 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-2f9nk"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.512891 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nbb7l"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.515820 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-9t6bj"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.517777 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-m57d7"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.517873 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dqjc9\" (UniqueName: \"kubernetes.io/projected/a1348b83-535e-4211-891a-d234f9e9c4ec-kube-api-access-dqjc9\") pod \"kube-storage-version-migrator-operator-b67b599dd-dfp6b\" (UID: \"a1348b83-535e-4211-891a-d234f9e9c4ec\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dfp6b" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.517912 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tlfps\" (UniqueName: \"kubernetes.io/projected/c4b1ffb1-0cb4-4429-9eb4-e3f994b2b6c8-kube-api-access-tlfps\") pod \"machine-approver-56656f9798-hshsk\" (UID: \"c4b1ffb1-0cb4-4429-9eb4-e3f994b2b6c8\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hshsk" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.517945 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r5gk8\" (UniqueName: \"kubernetes.io/projected/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-kube-api-access-r5gk8\") pod \"console-f9d7485db-nj6w8\" (UID: \"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf\") " pod="openshift-console/console-f9d7485db-nj6w8" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.517970 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/13085b2e-8c8e-4023-b86b-99dbfe7c7b5f-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-w5fdp\" (UID: \"13085b2e-8c8e-4023-b86b-99dbfe7c7b5f\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w5fdp" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.517994 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/01a09641-0222-4bd8-af33-bf92edcc229c-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-9t6bj\" (UID: \"01a09641-0222-4bd8-af33-bf92edcc229c\") " pod="openshift-controller-manager/controller-manager-879f6c89f-9t6bj" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518020 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rkd8c\" (UniqueName: \"kubernetes.io/projected/464e93da-ad92-4165-b1c2-6cede11ac006-kube-api-access-rkd8c\") pod \"openshift-config-operator-7777fb866f-64j56\" (UID: \"464e93da-ad92-4165-b1c2-6cede11ac006\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-64j56" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518042 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lhrj8\" (UniqueName: \"kubernetes.io/projected/aeecd090-6524-4d8e-a0eb-e785b78f99c2-kube-api-access-lhrj8\") pod \"etcd-operator-b45778765-wmngc\" (UID: \"aeecd090-6524-4d8e-a0eb-e785b78f99c2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wmngc" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518063 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/64f34f9a-3fdc-492d-a75f-93e4a3a8727f-metrics-certs\") pod \"router-default-5444994796-pxgkd\" (UID: \"64f34f9a-3fdc-492d-a75f-93e4a3a8727f\") " pod="openshift-ingress/router-default-5444994796-pxgkd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518083 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-trusted-ca-bundle\") pod \"console-f9d7485db-nj6w8\" (UID: \"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf\") " pod="openshift-console/console-f9d7485db-nj6w8" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518112 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aeecd090-6524-4d8e-a0eb-e785b78f99c2-config\") pod \"etcd-operator-b45778765-wmngc\" (UID: \"aeecd090-6524-4d8e-a0eb-e785b78f99c2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wmngc" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518132 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/aeecd090-6524-4d8e-a0eb-e785b78f99c2-etcd-service-ca\") pod \"etcd-operator-b45778765-wmngc\" (UID: \"aeecd090-6524-4d8e-a0eb-e785b78f99c2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wmngc" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518155 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/46c765da-7def-4c6e-8ac2-8da853bbb378-audit-dir\") pod \"apiserver-7bbb656c7d-rqlx6\" (UID: \"46c765da-7def-4c6e-8ac2-8da853bbb378\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-rqlx6" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518172 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6a4b74ae-6e26-4983-b8aa-08f4a9935aca-service-ca-bundle\") pod \"authentication-operator-69f744f599-gvnqg\" (UID: \"6a4b74ae-6e26-4983-b8aa-08f4a9935aca\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gvnqg" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518200 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-console-serving-cert\") pod \"console-f9d7485db-nj6w8\" (UID: \"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf\") " pod="openshift-console/console-f9d7485db-nj6w8" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518217 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x6jfx\" (UniqueName: \"kubernetes.io/projected/b887db51-86fd-44fb-b146-21b546ae5345-kube-api-access-x6jfx\") pod \"cluster-image-registry-operator-dc59b4c8b-2f9nk\" (UID: \"b887db51-86fd-44fb-b146-21b546ae5345\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-2f9nk" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518240 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518264 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kgstc\" (UniqueName: \"kubernetes.io/projected/6a4b74ae-6e26-4983-b8aa-08f4a9935aca-kube-api-access-kgstc\") pod \"authentication-operator-69f744f599-gvnqg\" (UID: \"6a4b74ae-6e26-4983-b8aa-08f4a9935aca\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gvnqg" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518281 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518296 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/34cd8aa7-5566-4824-8c72-10438437ef94-config\") pod \"console-operator-58897d9998-splm8\" (UID: \"34cd8aa7-5566-4824-8c72-10438437ef94\") " pod="openshift-console-operator/console-operator-58897d9998-splm8" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518317 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/46c765da-7def-4c6e-8ac2-8da853bbb378-etcd-client\") pod \"apiserver-7bbb656c7d-rqlx6\" (UID: \"46c765da-7def-4c6e-8ac2-8da853bbb378\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-rqlx6" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518364 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b887db51-86fd-44fb-b146-21b546ae5345-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-2f9nk\" (UID: \"b887db51-86fd-44fb-b146-21b546ae5345\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-2f9nk" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518386 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/02792ddc-034c-4fa3-8e9b-bde721cb94e5-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-r74vx\" (UID: \"02792ddc-034c-4fa3-8e9b-bde721cb94e5\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-r74vx" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518403 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/f93960d1-f97a-4d2d-9e04-edb082755a9a-encryption-config\") pod \"apiserver-76f77b778f-4chwd\" (UID: \"f93960d1-f97a-4d2d-9e04-edb082755a9a\") " pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518422 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bfsk7\" (UniqueName: \"kubernetes.io/projected/f030671f-5121-4cb4-8163-5c65444c1896-kube-api-access-bfsk7\") pod \"multus-admission-controller-857f4d67dd-fc6mw\" (UID: \"f030671f-5121-4cb4-8163-5c65444c1896\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-fc6mw" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518449 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/aeecd090-6524-4d8e-a0eb-e785b78f99c2-etcd-ca\") pod \"etcd-operator-b45778765-wmngc\" (UID: \"aeecd090-6524-4d8e-a0eb-e785b78f99c2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wmngc" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518467 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/3b01e1d1-14fa-4594-b52e-07b377965f5e-metrics-tls\") pod \"ingress-operator-5b745b69d9-5hrtq\" (UID: \"3b01e1d1-14fa-4594-b52e-07b377965f5e\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-5hrtq" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518486 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b887db51-86fd-44fb-b146-21b546ae5345-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-2f9nk\" (UID: \"b887db51-86fd-44fb-b146-21b546ae5345\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-2f9nk" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518502 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6npqx\" (UniqueName: \"kubernetes.io/projected/bf199acf-8845-4841-b653-d3f4b704f224-kube-api-access-6npqx\") pod \"cluster-samples-operator-665b6dd947-vntd5\" (UID: \"bf199acf-8845-4841-b653-d3f4b704f224\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vntd5" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518644 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/13085b2e-8c8e-4023-b86b-99dbfe7c7b5f-proxy-tls\") pod \"machine-config-controller-84d6567774-w5fdp\" (UID: \"13085b2e-8c8e-4023-b86b-99dbfe7c7b5f\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w5fdp" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518678 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-thv4n\" (UniqueName: \"kubernetes.io/projected/46c765da-7def-4c6e-8ac2-8da853bbb378-kube-api-access-thv4n\") pod \"apiserver-7bbb656c7d-rqlx6\" (UID: \"46c765da-7def-4c6e-8ac2-8da853bbb378\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-rqlx6" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518698 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a4b74ae-6e26-4983-b8aa-08f4a9935aca-config\") pod \"authentication-operator-69f744f599-gvnqg\" (UID: \"6a4b74ae-6e26-4983-b8aa-08f4a9935aca\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gvnqg" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518715 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6a4b74ae-6e26-4983-b8aa-08f4a9935aca-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-gvnqg\" (UID: \"6a4b74ae-6e26-4983-b8aa-08f4a9935aca\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gvnqg" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518733 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/e9cf419e-cde9-4a86-a206-24ac78d1e475-images\") pod \"machine-config-operator-74547568cd-5gv9p\" (UID: \"e9cf419e-cde9-4a86-a206-24ac78d1e475\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-5gv9p" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518747 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3b01e1d1-14fa-4594-b52e-07b377965f5e-bound-sa-token\") pod \"ingress-operator-5b745b69d9-5hrtq\" (UID: \"3b01e1d1-14fa-4594-b52e-07b377965f5e\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-5hrtq" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518767 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/3cc2b341-1b08-45ca-970f-b64350fbe88e-images\") pod \"machine-api-operator-5694c8668f-kfwch\" (UID: \"3cc2b341-1b08-45ca-970f-b64350fbe88e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-kfwch" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518784 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/3cc2b341-1b08-45ca-970f-b64350fbe88e-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-kfwch\" (UID: \"3cc2b341-1b08-45ca-970f-b64350fbe88e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-kfwch" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518801 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/34cd8aa7-5566-4824-8c72-10438437ef94-trusted-ca\") pod \"console-operator-58897d9998-splm8\" (UID: \"34cd8aa7-5566-4824-8c72-10438437ef94\") " pod="openshift-console-operator/console-operator-58897d9998-splm8" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518811 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-m57d7" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518820 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b887db51-86fd-44fb-b146-21b546ae5345-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-2f9nk\" (UID: \"b887db51-86fd-44fb-b146-21b546ae5345\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-2f9nk" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518838 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-console-config\") pod \"console-f9d7485db-nj6w8\" (UID: \"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf\") " pod="openshift-console/console-f9d7485db-nj6w8" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518855 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518874 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f7e67da4-caf9-4204-94a3-22f4e562a827-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-sdrv4\" (UID: \"f7e67da4-caf9-4204-94a3-22f4e562a827\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-sdrv4" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518890 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/34cd8aa7-5566-4824-8c72-10438437ef94-serving-cert\") pod \"console-operator-58897d9998-splm8\" (UID: \"34cd8aa7-5566-4824-8c72-10438437ef94\") " pod="openshift-console-operator/console-operator-58897d9998-splm8" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518907 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/02792ddc-034c-4fa3-8e9b-bde721cb94e5-config\") pod \"kube-apiserver-operator-766d6c64bb-r74vx\" (UID: \"02792ddc-034c-4fa3-8e9b-bde721cb94e5\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-r74vx" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518926 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/f93960d1-f97a-4d2d-9e04-edb082755a9a-node-pullsecrets\") pod \"apiserver-76f77b778f-4chwd\" (UID: \"f93960d1-f97a-4d2d-9e04-edb082755a9a\") " pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518944 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf199acf-8845-4841-b653-d3f4b704f224-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-vntd5\" (UID: \"bf199acf-8845-4841-b653-d3f4b704f224\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vntd5" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518960 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f93960d1-f97a-4d2d-9e04-edb082755a9a-trusted-ca-bundle\") pod \"apiserver-76f77b778f-4chwd\" (UID: \"f93960d1-f97a-4d2d-9e04-edb082755a9a\") " pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518976 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a1348b83-535e-4211-891a-d234f9e9c4ec-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-dfp6b\" (UID: \"a1348b83-535e-4211-891a-d234f9e9c4ec\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dfp6b" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.518992 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jhl27\" (UniqueName: \"kubernetes.io/projected/13085b2e-8c8e-4023-b86b-99dbfe7c7b5f-kube-api-access-jhl27\") pod \"machine-config-controller-84d6567774-w5fdp\" (UID: \"13085b2e-8c8e-4023-b86b-99dbfe7c7b5f\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w5fdp" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519016 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6a4b74ae-6e26-4983-b8aa-08f4a9935aca-serving-cert\") pod \"authentication-operator-69f744f599-gvnqg\" (UID: \"6a4b74ae-6e26-4983-b8aa-08f4a9935aca\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gvnqg" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519042 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519062 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p6qds\" (UniqueName: \"kubernetes.io/projected/e9cf419e-cde9-4a86-a206-24ac78d1e475-kube-api-access-p6qds\") pod \"machine-config-operator-74547568cd-5gv9p\" (UID: \"e9cf419e-cde9-4a86-a206-24ac78d1e475\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-5gv9p" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519080 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f0654c4-3371-4451-938f-803e6f1ffa69-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jz82q\" (UID: \"5f0654c4-3371-4451-938f-803e6f1ffa69\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jz82q" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519095 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-console-oauth-config\") pod \"console-f9d7485db-nj6w8\" (UID: \"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf\") " pod="openshift-console/console-f9d7485db-nj6w8" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519113 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519131 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45k9c\" (UniqueName: \"kubernetes.io/projected/34cd8aa7-5566-4824-8c72-10438437ef94-kube-api-access-45k9c\") pod \"console-operator-58897d9998-splm8\" (UID: \"34cd8aa7-5566-4824-8c72-10438437ef94\") " pod="openshift-console-operator/console-operator-58897d9998-splm8" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519146 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aeecd090-6524-4d8e-a0eb-e785b78f99c2-serving-cert\") pod \"etcd-operator-b45778765-wmngc\" (UID: \"aeecd090-6524-4d8e-a0eb-e785b78f99c2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wmngc" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519162 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/02792ddc-034c-4fa3-8e9b-bde721cb94e5-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-r74vx\" (UID: \"02792ddc-034c-4fa3-8e9b-bde721cb94e5\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-r74vx" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519179 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/f93960d1-f97a-4d2d-9e04-edb082755a9a-etcd-serving-ca\") pod \"apiserver-76f77b778f-4chwd\" (UID: \"f93960d1-f97a-4d2d-9e04-edb082755a9a\") " pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519197 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c4b1ffb1-0cb4-4429-9eb4-e3f994b2b6c8-config\") pod \"machine-approver-56656f9798-hshsk\" (UID: \"c4b1ffb1-0cb4-4429-9eb4-e3f994b2b6c8\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hshsk" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519215 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519231 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519248 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/64f34f9a-3fdc-492d-a75f-93e4a3a8727f-stats-auth\") pod \"router-default-5444994796-pxgkd\" (UID: \"64f34f9a-3fdc-492d-a75f-93e4a3a8727f\") " pod="openshift-ingress/router-default-5444994796-pxgkd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519264 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/46c765da-7def-4c6e-8ac2-8da853bbb378-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-rqlx6\" (UID: \"46c765da-7def-4c6e-8ac2-8da853bbb378\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-rqlx6" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519280 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tcmfj\" (UniqueName: \"kubernetes.io/projected/3cc2b341-1b08-45ca-970f-b64350fbe88e-kube-api-access-tcmfj\") pod \"machine-api-operator-5694c8668f-kfwch\" (UID: \"3cc2b341-1b08-45ca-970f-b64350fbe88e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-kfwch" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519297 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8z58d\" (UniqueName: \"kubernetes.io/projected/f7e67da4-caf9-4204-94a3-22f4e562a827-kube-api-access-8z58d\") pod \"openshift-controller-manager-operator-756b6f6bc6-sdrv4\" (UID: \"f7e67da4-caf9-4204-94a3-22f4e562a827\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-sdrv4" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519313 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/f93960d1-f97a-4d2d-9e04-edb082755a9a-image-import-ca\") pod \"apiserver-76f77b778f-4chwd\" (UID: \"f93960d1-f97a-4d2d-9e04-edb082755a9a\") " pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519331 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3c9ec92e-e01e-4a37-891a-ddc2293f0ced-config\") pod \"openshift-apiserver-operator-796bbdcf4f-nbb7l\" (UID: \"3c9ec92e-e01e-4a37-891a-ddc2293f0ced\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nbb7l" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519346 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/01a09641-0222-4bd8-af33-bf92edcc229c-client-ca\") pod \"controller-manager-879f6c89f-9t6bj\" (UID: \"01a09641-0222-4bd8-af33-bf92edcc229c\") " pod="openshift-controller-manager/controller-manager-879f6c89f-9t6bj" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519363 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/464e93da-ad92-4165-b1c2-6cede11ac006-available-featuregates\") pod \"openshift-config-operator-7777fb866f-64j56\" (UID: \"464e93da-ad92-4165-b1c2-6cede11ac006\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-64j56" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519385 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/64f34f9a-3fdc-492d-a75f-93e4a3a8727f-default-certificate\") pod \"router-default-5444994796-pxgkd\" (UID: \"64f34f9a-3fdc-492d-a75f-93e4a3a8727f\") " pod="openshift-ingress/router-default-5444994796-pxgkd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519406 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c4b1ffb1-0cb4-4429-9eb4-e3f994b2b6c8-auth-proxy-config\") pod \"machine-approver-56656f9798-hshsk\" (UID: \"c4b1ffb1-0cb4-4429-9eb4-e3f994b2b6c8\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hshsk" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519426 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-oauth-serving-cert\") pod \"console-f9d7485db-nj6w8\" (UID: \"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf\") " pod="openshift-console/console-f9d7485db-nj6w8" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519446 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519461 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/f93960d1-f97a-4d2d-9e04-edb082755a9a-audit\") pod \"apiserver-76f77b778f-4chwd\" (UID: \"f93960d1-f97a-4d2d-9e04-edb082755a9a\") " pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519476 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/46c765da-7def-4c6e-8ac2-8da853bbb378-audit-policies\") pod \"apiserver-7bbb656c7d-rqlx6\" (UID: \"46c765da-7def-4c6e-8ac2-8da853bbb378\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-rqlx6" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519491 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/8bad98aa-94be-4024-8cb5-dc6078ffec1f-audit-policies\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519508 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-service-ca\") pod \"console-f9d7485db-nj6w8\" (UID: \"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf\") " pod="openshift-console/console-f9d7485db-nj6w8" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519523 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4bmmn\" (UniqueName: \"kubernetes.io/projected/8bad98aa-94be-4024-8cb5-dc6078ffec1f-kube-api-access-4bmmn\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519559 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5f0654c4-3371-4451-938f-803e6f1ffa69-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jz82q\" (UID: \"5f0654c4-3371-4451-938f-803e6f1ffa69\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jz82q" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519578 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b4qb5\" (UniqueName: \"kubernetes.io/projected/3c9ec92e-e01e-4a37-891a-ddc2293f0ced-kube-api-access-b4qb5\") pod \"openshift-apiserver-operator-796bbdcf4f-nbb7l\" (UID: \"3c9ec92e-e01e-4a37-891a-ddc2293f0ced\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nbb7l" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519602 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/c4b1ffb1-0cb4-4429-9eb4-e3f994b2b6c8-machine-approver-tls\") pod \"machine-approver-56656f9798-hshsk\" (UID: \"c4b1ffb1-0cb4-4429-9eb4-e3f994b2b6c8\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hshsk" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519619 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519635 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519651 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4546r\" (UniqueName: \"kubernetes.io/projected/f93960d1-f97a-4d2d-9e04-edb082755a9a-kube-api-access-4546r\") pod \"apiserver-76f77b778f-4chwd\" (UID: \"f93960d1-f97a-4d2d-9e04-edb082755a9a\") " pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519659 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/01a09641-0222-4bd8-af33-bf92edcc229c-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-9t6bj\" (UID: \"01a09641-0222-4bd8-af33-bf92edcc229c\") " pod="openshift-controller-manager/controller-manager-879f6c89f-9t6bj" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519668 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1348b83-535e-4211-891a-d234f9e9c4ec-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-dfp6b\" (UID: \"a1348b83-535e-4211-891a-d234f9e9c4ec\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dfp6b" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519720 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/46c765da-7def-4c6e-8ac2-8da853bbb378-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-rqlx6\" (UID: \"46c765da-7def-4c6e-8ac2-8da853bbb378\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-rqlx6" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519744 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01a09641-0222-4bd8-af33-bf92edcc229c-config\") pod \"controller-manager-879f6c89f-9t6bj\" (UID: \"01a09641-0222-4bd8-af33-bf92edcc229c\") " pod="openshift-controller-manager/controller-manager-879f6c89f-9t6bj" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519762 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kxzxx\" (UniqueName: \"kubernetes.io/projected/4662d425-aec1-4e58-845b-36ae7574da7a-kube-api-access-kxzxx\") pod \"dns-operator-744455d44c-td2bc\" (UID: \"4662d425-aec1-4e58-845b-36ae7574da7a\") " pod="openshift-dns-operator/dns-operator-744455d44c-td2bc" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519783 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/64f34f9a-3fdc-492d-a75f-93e4a3a8727f-service-ca-bundle\") pod \"router-default-5444994796-pxgkd\" (UID: \"64f34f9a-3fdc-492d-a75f-93e4a3a8727f\") " pod="openshift-ingress/router-default-5444994796-pxgkd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519804 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/f030671f-5121-4cb4-8163-5c65444c1896-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-fc6mw\" (UID: \"f030671f-5121-4cb4-8163-5c65444c1896\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-fc6mw" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519826 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3c9ec92e-e01e-4a37-891a-ddc2293f0ced-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-nbb7l\" (UID: \"3c9ec92e-e01e-4a37-891a-ddc2293f0ced\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nbb7l" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519845 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ndns5\" (UniqueName: \"kubernetes.io/projected/3b01e1d1-14fa-4594-b52e-07b377965f5e-kube-api-access-ndns5\") pod \"ingress-operator-5b745b69d9-5hrtq\" (UID: \"3b01e1d1-14fa-4594-b52e-07b377965f5e\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-5hrtq" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519868 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3cc2b341-1b08-45ca-970f-b64350fbe88e-config\") pod \"machine-api-operator-5694c8668f-kfwch\" (UID: \"3cc2b341-1b08-45ca-970f-b64350fbe88e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-kfwch" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519885 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f93960d1-f97a-4d2d-9e04-edb082755a9a-serving-cert\") pod \"apiserver-76f77b778f-4chwd\" (UID: \"f93960d1-f97a-4d2d-9e04-edb082755a9a\") " pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519903 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/464e93da-ad92-4165-b1c2-6cede11ac006-serving-cert\") pod \"openshift-config-operator-7777fb866f-64j56\" (UID: \"464e93da-ad92-4165-b1c2-6cede11ac006\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-64j56" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519921 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/46c765da-7def-4c6e-8ac2-8da853bbb378-encryption-config\") pod \"apiserver-7bbb656c7d-rqlx6\" (UID: \"46c765da-7def-4c6e-8ac2-8da853bbb378\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-rqlx6" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519938 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mfw2f\" (UniqueName: \"kubernetes.io/projected/135cae13-5b75-4d98-9c17-61448faddf90-kube-api-access-mfw2f\") pod \"downloads-7954f5f757-dd95m\" (UID: \"135cae13-5b75-4d98-9c17-61448faddf90\") " pod="openshift-console/downloads-7954f5f757-dd95m" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519955 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/8bad98aa-94be-4024-8cb5-dc6078ffec1f-audit-dir\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519977 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/e9cf419e-cde9-4a86-a206-24ac78d1e475-proxy-tls\") pod \"machine-config-operator-74547568cd-5gv9p\" (UID: \"e9cf419e-cde9-4a86-a206-24ac78d1e475\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-5gv9p" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.519991 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f93960d1-f97a-4d2d-9e04-edb082755a9a-audit-dir\") pod \"apiserver-76f77b778f-4chwd\" (UID: \"f93960d1-f97a-4d2d-9e04-edb082755a9a\") " pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.520012 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.520029 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7e67da4-caf9-4204-94a3-22f4e562a827-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-sdrv4\" (UID: \"f7e67da4-caf9-4204-94a3-22f4e562a827\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-sdrv4" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.520046 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2wzj5\" (UniqueName: \"kubernetes.io/projected/64f34f9a-3fdc-492d-a75f-93e4a3a8727f-kube-api-access-2wzj5\") pod \"router-default-5444994796-pxgkd\" (UID: \"64f34f9a-3fdc-492d-a75f-93e4a3a8727f\") " pod="openshift-ingress/router-default-5444994796-pxgkd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.520062 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/aeecd090-6524-4d8e-a0eb-e785b78f99c2-etcd-client\") pod \"etcd-operator-b45778765-wmngc\" (UID: \"aeecd090-6524-4d8e-a0eb-e785b78f99c2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wmngc" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.520090 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m78w5\" (UniqueName: \"kubernetes.io/projected/01a09641-0222-4bd8-af33-bf92edcc229c-kube-api-access-m78w5\") pod \"controller-manager-879f6c89f-9t6bj\" (UID: \"01a09641-0222-4bd8-af33-bf92edcc229c\") " pod="openshift-controller-manager/controller-manager-879f6c89f-9t6bj" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.520108 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e9cf419e-cde9-4a86-a206-24ac78d1e475-auth-proxy-config\") pod \"machine-config-operator-74547568cd-5gv9p\" (UID: \"e9cf419e-cde9-4a86-a206-24ac78d1e475\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-5gv9p" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.520123 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/f93960d1-f97a-4d2d-9e04-edb082755a9a-etcd-client\") pod \"apiserver-76f77b778f-4chwd\" (UID: \"f93960d1-f97a-4d2d-9e04-edb082755a9a\") " pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.520139 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/46c765da-7def-4c6e-8ac2-8da853bbb378-serving-cert\") pod \"apiserver-7bbb656c7d-rqlx6\" (UID: \"46c765da-7def-4c6e-8ac2-8da853bbb378\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-rqlx6" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.520155 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/4662d425-aec1-4e58-845b-36ae7574da7a-metrics-tls\") pod \"dns-operator-744455d44c-td2bc\" (UID: \"4662d425-aec1-4e58-845b-36ae7574da7a\") " pod="openshift-dns-operator/dns-operator-744455d44c-td2bc" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.520173 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f93960d1-f97a-4d2d-9e04-edb082755a9a-config\") pod \"apiserver-76f77b778f-4chwd\" (UID: \"f93960d1-f97a-4d2d-9e04-edb082755a9a\") " pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.520189 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01a09641-0222-4bd8-af33-bf92edcc229c-serving-cert\") pod \"controller-manager-879f6c89f-9t6bj\" (UID: \"01a09641-0222-4bd8-af33-bf92edcc229c\") " pod="openshift-controller-manager/controller-manager-879f6c89f-9t6bj" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.520205 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3b01e1d1-14fa-4594-b52e-07b377965f5e-trusted-ca\") pod \"ingress-operator-5b745b69d9-5hrtq\" (UID: \"3b01e1d1-14fa-4594-b52e-07b377965f5e\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-5hrtq" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.520220 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5f0654c4-3371-4451-938f-803e6f1ffa69-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jz82q\" (UID: \"5f0654c4-3371-4451-938f-803e6f1ffa69\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jz82q" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.520268 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6a4b74ae-6e26-4983-b8aa-08f4a9935aca-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-gvnqg\" (UID: \"6a4b74ae-6e26-4983-b8aa-08f4a9935aca\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gvnqg" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.521889 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/46c765da-7def-4c6e-8ac2-8da853bbb378-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-rqlx6\" (UID: \"46c765da-7def-4c6e-8ac2-8da853bbb378\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-rqlx6" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.522825 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01a09641-0222-4bd8-af33-bf92edcc229c-config\") pod \"controller-manager-879f6c89f-9t6bj\" (UID: \"01a09641-0222-4bd8-af33-bf92edcc229c\") " pod="openshift-controller-manager/controller-manager-879f6c89f-9t6bj" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.523427 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/64f34f9a-3fdc-492d-a75f-93e4a3a8727f-service-ca-bundle\") pod \"router-default-5444994796-pxgkd\" (UID: \"64f34f9a-3fdc-492d-a75f-93e4a3a8727f\") " pod="openshift-ingress/router-default-5444994796-pxgkd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.526142 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-fjv5r"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.527222 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/64f34f9a-3fdc-492d-a75f-93e4a3a8727f-metrics-certs\") pod \"router-default-5444994796-pxgkd\" (UID: \"64f34f9a-3fdc-492d-a75f-93e4a3a8727f\") " pod="openshift-ingress/router-default-5444994796-pxgkd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.528156 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-trusted-ca-bundle\") pod \"console-f9d7485db-nj6w8\" (UID: \"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf\") " pod="openshift-console/console-f9d7485db-nj6w8" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.528283 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/46c765da-7def-4c6e-8ac2-8da853bbb378-audit-dir\") pod \"apiserver-7bbb656c7d-rqlx6\" (UID: \"46c765da-7def-4c6e-8ac2-8da853bbb378\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-rqlx6" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.528594 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3c9ec92e-e01e-4a37-891a-ddc2293f0ced-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-nbb7l\" (UID: \"3c9ec92e-e01e-4a37-891a-ddc2293f0ced\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nbb7l" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.528836 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6a4b74ae-6e26-4983-b8aa-08f4a9935aca-service-ca-bundle\") pod \"authentication-operator-69f744f599-gvnqg\" (UID: \"6a4b74ae-6e26-4983-b8aa-08f4a9935aca\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gvnqg" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.529462 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3cc2b341-1b08-45ca-970f-b64350fbe88e-config\") pod \"machine-api-operator-5694c8668f-kfwch\" (UID: \"3cc2b341-1b08-45ca-970f-b64350fbe88e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-kfwch" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.531021 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-console-config\") pod \"console-f9d7485db-nj6w8\" (UID: \"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf\") " pod="openshift-console/console-f9d7485db-nj6w8" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.531632 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/3cc2b341-1b08-45ca-970f-b64350fbe88e-images\") pod \"machine-api-operator-5694c8668f-kfwch\" (UID: \"3cc2b341-1b08-45ca-970f-b64350fbe88e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-kfwch" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.535567 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.535570 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/8bad98aa-94be-4024-8cb5-dc6078ffec1f-audit-dir\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.537956 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.538247 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/46c765da-7def-4c6e-8ac2-8da853bbb378-encryption-config\") pod \"apiserver-7bbb656c7d-rqlx6\" (UID: \"46c765da-7def-4c6e-8ac2-8da853bbb378\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-rqlx6" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.539203 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.539230 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/46c765da-7def-4c6e-8ac2-8da853bbb378-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-rqlx6\" (UID: \"46c765da-7def-4c6e-8ac2-8da853bbb378\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-rqlx6" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.539707 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/64f34f9a-3fdc-492d-a75f-93e4a3a8727f-stats-auth\") pod \"router-default-5444994796-pxgkd\" (UID: \"64f34f9a-3fdc-492d-a75f-93e4a3a8727f\") " pod="openshift-ingress/router-default-5444994796-pxgkd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.539881 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3c9ec92e-e01e-4a37-891a-ddc2293f0ced-config\") pod \"openshift-apiserver-operator-796bbdcf4f-nbb7l\" (UID: \"3c9ec92e-e01e-4a37-891a-ddc2293f0ced\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nbb7l" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.540378 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c4b1ffb1-0cb4-4429-9eb4-e3f994b2b6c8-config\") pod \"machine-approver-56656f9798-hshsk\" (UID: \"c4b1ffb1-0cb4-4429-9eb4-e3f994b2b6c8\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hshsk" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.540492 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/464e93da-ad92-4165-b1c2-6cede11ac006-available-featuregates\") pod \"openshift-config-operator-7777fb866f-64j56\" (UID: \"464e93da-ad92-4165-b1c2-6cede11ac006\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-64j56" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.540790 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/46c765da-7def-4c6e-8ac2-8da853bbb378-etcd-client\") pod \"apiserver-7bbb656c7d-rqlx6\" (UID: \"46c765da-7def-4c6e-8ac2-8da853bbb378\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-rqlx6" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.541227 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6a4b74ae-6e26-4983-b8aa-08f4a9935aca-serving-cert\") pod \"authentication-operator-69f744f599-gvnqg\" (UID: \"6a4b74ae-6e26-4983-b8aa-08f4a9935aca\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gvnqg" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.541402 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-console-oauth-config\") pod \"console-f9d7485db-nj6w8\" (UID: \"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf\") " pod="openshift-console/console-f9d7485db-nj6w8" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.542065 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-gvnqg"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.542955 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-oauth-serving-cert\") pod \"console-f9d7485db-nj6w8\" (UID: \"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf\") " pod="openshift-console/console-f9d7485db-nj6w8" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.543123 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b887db51-86fd-44fb-b146-21b546ae5345-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-2f9nk\" (UID: \"b887db51-86fd-44fb-b146-21b546ae5345\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-2f9nk" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.543650 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.544276 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/46c765da-7def-4c6e-8ac2-8da853bbb378-audit-policies\") pod \"apiserver-7bbb656c7d-rqlx6\" (UID: \"46c765da-7def-4c6e-8ac2-8da853bbb378\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-rqlx6" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.544805 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/8bad98aa-94be-4024-8cb5-dc6078ffec1f-audit-policies\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.545043 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/01a09641-0222-4bd8-af33-bf92edcc229c-client-ca\") pod \"controller-manager-879f6c89f-9t6bj\" (UID: \"01a09641-0222-4bd8-af33-bf92edcc229c\") " pod="openshift-controller-manager/controller-manager-879f6c89f-9t6bj" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.545024 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c4b1ffb1-0cb4-4429-9eb4-e3f994b2b6c8-auth-proxy-config\") pod \"machine-approver-56656f9798-hshsk\" (UID: \"c4b1ffb1-0cb4-4429-9eb4-e3f994b2b6c8\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hshsk" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.545124 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b887db51-86fd-44fb-b146-21b546ae5345-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-2f9nk\" (UID: \"b887db51-86fd-44fb-b146-21b546ae5345\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-2f9nk" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.545459 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.545673 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/464e93da-ad92-4165-b1c2-6cede11ac006-serving-cert\") pod \"openshift-config-operator-7777fb866f-64j56\" (UID: \"464e93da-ad92-4165-b1c2-6cede11ac006\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-64j56" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.546503 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/3cc2b341-1b08-45ca-970f-b64350fbe88e-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-kfwch\" (UID: \"3cc2b341-1b08-45ca-970f-b64350fbe88e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-kfwch" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.546718 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.547487 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.547519 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-service-ca\") pod \"console-f9d7485db-nj6w8\" (UID: \"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf\") " pod="openshift-console/console-f9d7485db-nj6w8" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.548410 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.548724 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-dd95m"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.549025 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.551169 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-console-serving-cert\") pod \"console-f9d7485db-nj6w8\" (UID: \"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf\") " pod="openshift-console/console-f9d7485db-nj6w8" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.551618 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/4662d425-aec1-4e58-845b-36ae7574da7a-metrics-tls\") pod \"dns-operator-744455d44c-td2bc\" (UID: \"4662d425-aec1-4e58-845b-36ae7574da7a\") " pod="openshift-dns-operator/dns-operator-744455d44c-td2bc" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.556429 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vntd5"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.557895 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.558724 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-td2bc"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.560311 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.562448 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.562813 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.565913 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/46c765da-7def-4c6e-8ac2-8da853bbb378-serving-cert\") pod \"apiserver-7bbb656c7d-rqlx6\" (UID: \"46c765da-7def-4c6e-8ac2-8da853bbb378\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-rqlx6" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.567806 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/c4b1ffb1-0cb4-4429-9eb4-e3f994b2b6c8-machine-approver-tls\") pod \"machine-approver-56656f9798-hshsk\" (UID: \"c4b1ffb1-0cb4-4429-9eb4-e3f994b2b6c8\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hshsk" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.568382 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01a09641-0222-4bd8-af33-bf92edcc229c-serving-cert\") pod \"controller-manager-879f6c89f-9t6bj\" (UID: \"01a09641-0222-4bd8-af33-bf92edcc229c\") " pod="openshift-controller-manager/controller-manager-879f6c89f-9t6bj" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.568460 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf199acf-8845-4841-b653-d3f4b704f224-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-vntd5\" (UID: \"bf199acf-8845-4841-b653-d3f4b704f224\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vntd5" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.569899 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/64f34f9a-3fdc-492d-a75f-93e4a3a8727f-default-certificate\") pod \"router-default-5444994796-pxgkd\" (UID: \"64f34f9a-3fdc-492d-a75f-93e4a3a8727f\") " pod="openshift-ingress/router-default-5444994796-pxgkd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.570871 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-hw9bx"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.573545 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-sdrv4"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.575550 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-h6c4h"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.576880 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8p5fc"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.577967 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-splm8"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.578919 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.579233 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kxr75"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.581003 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-64j56"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.583072 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-nj6w8"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.584494 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-fc6mw"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.585935 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-t7x6w"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.587098 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-t7x6w" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.587945 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-kfwch"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.589761 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-xv59p"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.590433 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-xv59p" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.591253 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-5hrtq"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.593277 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-r74vx"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.595115 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jz82q"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.596563 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-5gv9p"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.597663 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-v7lp8"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.598460 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.598746 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-wmngc"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.599852 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-4chwd"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.600992 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-w5fdp"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.602477 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-dgwd6"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.603653 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401485-brhb9"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.604695 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sqpll"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.605757 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dfp6b"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.606832 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7cvbf"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.608328 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-x9d4d"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.610058 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-nkjmk"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.611345 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-xv59p"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.612639 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-t7x6w"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.613941 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-m57d7"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.615956 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-pt5mz"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.617704 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-brj9b"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.618338 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.619166 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-bwjqq"] Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.620038 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-bwjqq" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.621018 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/e9cf419e-cde9-4a86-a206-24ac78d1e475-proxy-tls\") pod \"machine-config-operator-74547568cd-5gv9p\" (UID: \"e9cf419e-cde9-4a86-a206-24ac78d1e475\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-5gv9p" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.621058 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f93960d1-f97a-4d2d-9e04-edb082755a9a-audit-dir\") pod \"apiserver-76f77b778f-4chwd\" (UID: \"f93960d1-f97a-4d2d-9e04-edb082755a9a\") " pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.621080 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7e67da4-caf9-4204-94a3-22f4e562a827-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-sdrv4\" (UID: \"f7e67da4-caf9-4204-94a3-22f4e562a827\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-sdrv4" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.621108 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/aeecd090-6524-4d8e-a0eb-e785b78f99c2-etcd-client\") pod \"etcd-operator-b45778765-wmngc\" (UID: \"aeecd090-6524-4d8e-a0eb-e785b78f99c2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wmngc" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.621140 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e9cf419e-cde9-4a86-a206-24ac78d1e475-auth-proxy-config\") pod \"machine-config-operator-74547568cd-5gv9p\" (UID: \"e9cf419e-cde9-4a86-a206-24ac78d1e475\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-5gv9p" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.621159 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/f93960d1-f97a-4d2d-9e04-edb082755a9a-etcd-client\") pod \"apiserver-76f77b778f-4chwd\" (UID: \"f93960d1-f97a-4d2d-9e04-edb082755a9a\") " pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.621178 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f93960d1-f97a-4d2d-9e04-edb082755a9a-config\") pod \"apiserver-76f77b778f-4chwd\" (UID: \"f93960d1-f97a-4d2d-9e04-edb082755a9a\") " pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.621285 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f93960d1-f97a-4d2d-9e04-edb082755a9a-audit-dir\") pod \"apiserver-76f77b778f-4chwd\" (UID: \"f93960d1-f97a-4d2d-9e04-edb082755a9a\") " pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.621332 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3b01e1d1-14fa-4594-b52e-07b377965f5e-trusted-ca\") pod \"ingress-operator-5b745b69d9-5hrtq\" (UID: \"3b01e1d1-14fa-4594-b52e-07b377965f5e\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-5hrtq" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.621467 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5f0654c4-3371-4451-938f-803e6f1ffa69-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jz82q\" (UID: \"5f0654c4-3371-4451-938f-803e6f1ffa69\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jz82q" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.621562 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dqjc9\" (UniqueName: \"kubernetes.io/projected/a1348b83-535e-4211-891a-d234f9e9c4ec-kube-api-access-dqjc9\") pod \"kube-storage-version-migrator-operator-b67b599dd-dfp6b\" (UID: \"a1348b83-535e-4211-891a-d234f9e9c4ec\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dfp6b" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.621637 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/13085b2e-8c8e-4023-b86b-99dbfe7c7b5f-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-w5fdp\" (UID: \"13085b2e-8c8e-4023-b86b-99dbfe7c7b5f\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w5fdp" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.621698 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lhrj8\" (UniqueName: \"kubernetes.io/projected/aeecd090-6524-4d8e-a0eb-e785b78f99c2-kube-api-access-lhrj8\") pod \"etcd-operator-b45778765-wmngc\" (UID: \"aeecd090-6524-4d8e-a0eb-e785b78f99c2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wmngc" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.621734 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aeecd090-6524-4d8e-a0eb-e785b78f99c2-config\") pod \"etcd-operator-b45778765-wmngc\" (UID: \"aeecd090-6524-4d8e-a0eb-e785b78f99c2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wmngc" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.621759 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/aeecd090-6524-4d8e-a0eb-e785b78f99c2-etcd-service-ca\") pod \"etcd-operator-b45778765-wmngc\" (UID: \"aeecd090-6524-4d8e-a0eb-e785b78f99c2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wmngc" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.621823 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/34cd8aa7-5566-4824-8c72-10438437ef94-config\") pod \"console-operator-58897d9998-splm8\" (UID: \"34cd8aa7-5566-4824-8c72-10438437ef94\") " pod="openshift-console-operator/console-operator-58897d9998-splm8" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.621866 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/02792ddc-034c-4fa3-8e9b-bde721cb94e5-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-r74vx\" (UID: \"02792ddc-034c-4fa3-8e9b-bde721cb94e5\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-r74vx" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.621889 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e9cf419e-cde9-4a86-a206-24ac78d1e475-auth-proxy-config\") pod \"machine-config-operator-74547568cd-5gv9p\" (UID: \"e9cf419e-cde9-4a86-a206-24ac78d1e475\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-5gv9p" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.621896 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/f93960d1-f97a-4d2d-9e04-edb082755a9a-encryption-config\") pod \"apiserver-76f77b778f-4chwd\" (UID: \"f93960d1-f97a-4d2d-9e04-edb082755a9a\") " pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.621967 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bfsk7\" (UniqueName: \"kubernetes.io/projected/f030671f-5121-4cb4-8163-5c65444c1896-kube-api-access-bfsk7\") pod \"multus-admission-controller-857f4d67dd-fc6mw\" (UID: \"f030671f-5121-4cb4-8163-5c65444c1896\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-fc6mw" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.622009 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/aeecd090-6524-4d8e-a0eb-e785b78f99c2-etcd-ca\") pod \"etcd-operator-b45778765-wmngc\" (UID: \"aeecd090-6524-4d8e-a0eb-e785b78f99c2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wmngc" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.622027 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/3b01e1d1-14fa-4594-b52e-07b377965f5e-metrics-tls\") pod \"ingress-operator-5b745b69d9-5hrtq\" (UID: \"3b01e1d1-14fa-4594-b52e-07b377965f5e\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-5hrtq" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.622057 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/13085b2e-8c8e-4023-b86b-99dbfe7c7b5f-proxy-tls\") pod \"machine-config-controller-84d6567774-w5fdp\" (UID: \"13085b2e-8c8e-4023-b86b-99dbfe7c7b5f\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w5fdp" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.622088 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/e9cf419e-cde9-4a86-a206-24ac78d1e475-images\") pod \"machine-config-operator-74547568cd-5gv9p\" (UID: \"e9cf419e-cde9-4a86-a206-24ac78d1e475\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-5gv9p" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.622106 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3b01e1d1-14fa-4594-b52e-07b377965f5e-bound-sa-token\") pod \"ingress-operator-5b745b69d9-5hrtq\" (UID: \"3b01e1d1-14fa-4594-b52e-07b377965f5e\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-5hrtq" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.622129 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/34cd8aa7-5566-4824-8c72-10438437ef94-trusted-ca\") pod \"console-operator-58897d9998-splm8\" (UID: \"34cd8aa7-5566-4824-8c72-10438437ef94\") " pod="openshift-console-operator/console-operator-58897d9998-splm8" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.622158 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f7e67da4-caf9-4204-94a3-22f4e562a827-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-sdrv4\" (UID: \"f7e67da4-caf9-4204-94a3-22f4e562a827\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-sdrv4" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.622259 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/34cd8aa7-5566-4824-8c72-10438437ef94-serving-cert\") pod \"console-operator-58897d9998-splm8\" (UID: \"34cd8aa7-5566-4824-8c72-10438437ef94\") " pod="openshift-console-operator/console-operator-58897d9998-splm8" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.622290 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/02792ddc-034c-4fa3-8e9b-bde721cb94e5-config\") pod \"kube-apiserver-operator-766d6c64bb-r74vx\" (UID: \"02792ddc-034c-4fa3-8e9b-bde721cb94e5\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-r74vx" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.622319 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/f93960d1-f97a-4d2d-9e04-edb082755a9a-node-pullsecrets\") pod \"apiserver-76f77b778f-4chwd\" (UID: \"f93960d1-f97a-4d2d-9e04-edb082755a9a\") " pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.622343 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f93960d1-f97a-4d2d-9e04-edb082755a9a-trusted-ca-bundle\") pod \"apiserver-76f77b778f-4chwd\" (UID: \"f93960d1-f97a-4d2d-9e04-edb082755a9a\") " pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.622366 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a1348b83-535e-4211-891a-d234f9e9c4ec-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-dfp6b\" (UID: \"a1348b83-535e-4211-891a-d234f9e9c4ec\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dfp6b" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.622412 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jhl27\" (UniqueName: \"kubernetes.io/projected/13085b2e-8c8e-4023-b86b-99dbfe7c7b5f-kube-api-access-jhl27\") pod \"machine-config-controller-84d6567774-w5fdp\" (UID: \"13085b2e-8c8e-4023-b86b-99dbfe7c7b5f\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w5fdp" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.622471 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p6qds\" (UniqueName: \"kubernetes.io/projected/e9cf419e-cde9-4a86-a206-24ac78d1e475-kube-api-access-p6qds\") pod \"machine-config-operator-74547568cd-5gv9p\" (UID: \"e9cf419e-cde9-4a86-a206-24ac78d1e475\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-5gv9p" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.622499 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f0654c4-3371-4451-938f-803e6f1ffa69-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jz82q\" (UID: \"5f0654c4-3371-4451-938f-803e6f1ffa69\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jz82q" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.622553 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-45k9c\" (UniqueName: \"kubernetes.io/projected/34cd8aa7-5566-4824-8c72-10438437ef94-kube-api-access-45k9c\") pod \"console-operator-58897d9998-splm8\" (UID: \"34cd8aa7-5566-4824-8c72-10438437ef94\") " pod="openshift-console-operator/console-operator-58897d9998-splm8" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.622577 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aeecd090-6524-4d8e-a0eb-e785b78f99c2-serving-cert\") pod \"etcd-operator-b45778765-wmngc\" (UID: \"aeecd090-6524-4d8e-a0eb-e785b78f99c2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wmngc" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.622573 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/f93960d1-f97a-4d2d-9e04-edb082755a9a-node-pullsecrets\") pod \"apiserver-76f77b778f-4chwd\" (UID: \"f93960d1-f97a-4d2d-9e04-edb082755a9a\") " pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.622608 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/02792ddc-034c-4fa3-8e9b-bde721cb94e5-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-r74vx\" (UID: \"02792ddc-034c-4fa3-8e9b-bde721cb94e5\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-r74vx" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.622636 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/f93960d1-f97a-4d2d-9e04-edb082755a9a-etcd-serving-ca\") pod \"apiserver-76f77b778f-4chwd\" (UID: \"f93960d1-f97a-4d2d-9e04-edb082755a9a\") " pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.622675 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8z58d\" (UniqueName: \"kubernetes.io/projected/f7e67da4-caf9-4204-94a3-22f4e562a827-kube-api-access-8z58d\") pod \"openshift-controller-manager-operator-756b6f6bc6-sdrv4\" (UID: \"f7e67da4-caf9-4204-94a3-22f4e562a827\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-sdrv4" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.622698 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/f93960d1-f97a-4d2d-9e04-edb082755a9a-image-import-ca\") pod \"apiserver-76f77b778f-4chwd\" (UID: \"f93960d1-f97a-4d2d-9e04-edb082755a9a\") " pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.622729 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/f93960d1-f97a-4d2d-9e04-edb082755a9a-audit\") pod \"apiserver-76f77b778f-4chwd\" (UID: \"f93960d1-f97a-4d2d-9e04-edb082755a9a\") " pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.622764 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5f0654c4-3371-4451-938f-803e6f1ffa69-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jz82q\" (UID: \"5f0654c4-3371-4451-938f-803e6f1ffa69\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jz82q" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.622806 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/13085b2e-8c8e-4023-b86b-99dbfe7c7b5f-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-w5fdp\" (UID: \"13085b2e-8c8e-4023-b86b-99dbfe7c7b5f\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w5fdp" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.622813 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4546r\" (UniqueName: \"kubernetes.io/projected/f93960d1-f97a-4d2d-9e04-edb082755a9a-kube-api-access-4546r\") pod \"apiserver-76f77b778f-4chwd\" (UID: \"f93960d1-f97a-4d2d-9e04-edb082755a9a\") " pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.622872 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1348b83-535e-4211-891a-d234f9e9c4ec-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-dfp6b\" (UID: \"a1348b83-535e-4211-891a-d234f9e9c4ec\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dfp6b" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.622906 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/f030671f-5121-4cb4-8163-5c65444c1896-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-fc6mw\" (UID: \"f030671f-5121-4cb4-8163-5c65444c1896\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-fc6mw" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.622943 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ndns5\" (UniqueName: \"kubernetes.io/projected/3b01e1d1-14fa-4594-b52e-07b377965f5e-kube-api-access-ndns5\") pod \"ingress-operator-5b745b69d9-5hrtq\" (UID: \"3b01e1d1-14fa-4594-b52e-07b377965f5e\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-5hrtq" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.622969 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f93960d1-f97a-4d2d-9e04-edb082755a9a-serving-cert\") pod \"apiserver-76f77b778f-4chwd\" (UID: \"f93960d1-f97a-4d2d-9e04-edb082755a9a\") " pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.623931 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/e9cf419e-cde9-4a86-a206-24ac78d1e475-images\") pod \"machine-config-operator-74547568cd-5gv9p\" (UID: \"e9cf419e-cde9-4a86-a206-24ac78d1e475\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-5gv9p" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.624431 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f93960d1-f97a-4d2d-9e04-edb082755a9a-trusted-ca-bundle\") pod \"apiserver-76f77b778f-4chwd\" (UID: \"f93960d1-f97a-4d2d-9e04-edb082755a9a\") " pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.624476 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/02792ddc-034c-4fa3-8e9b-bde721cb94e5-config\") pod \"kube-apiserver-operator-766d6c64bb-r74vx\" (UID: \"02792ddc-034c-4fa3-8e9b-bde721cb94e5\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-r74vx" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.624519 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/f93960d1-f97a-4d2d-9e04-edb082755a9a-etcd-serving-ca\") pod \"apiserver-76f77b778f-4chwd\" (UID: \"f93960d1-f97a-4d2d-9e04-edb082755a9a\") " pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.624876 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/f93960d1-f97a-4d2d-9e04-edb082755a9a-image-import-ca\") pod \"apiserver-76f77b778f-4chwd\" (UID: \"f93960d1-f97a-4d2d-9e04-edb082755a9a\") " pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.624961 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/f93960d1-f97a-4d2d-9e04-edb082755a9a-audit\") pod \"apiserver-76f77b778f-4chwd\" (UID: \"f93960d1-f97a-4d2d-9e04-edb082755a9a\") " pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.626196 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/f93960d1-f97a-4d2d-9e04-edb082755a9a-etcd-client\") pod \"apiserver-76f77b778f-4chwd\" (UID: \"f93960d1-f97a-4d2d-9e04-edb082755a9a\") " pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.626701 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/e9cf419e-cde9-4a86-a206-24ac78d1e475-proxy-tls\") pod \"machine-config-operator-74547568cd-5gv9p\" (UID: \"e9cf419e-cde9-4a86-a206-24ac78d1e475\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-5gv9p" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.626743 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/34cd8aa7-5566-4824-8c72-10438437ef94-serving-cert\") pod \"console-operator-58897d9998-splm8\" (UID: \"34cd8aa7-5566-4824-8c72-10438437ef94\") " pod="openshift-console-operator/console-operator-58897d9998-splm8" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.627305 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/02792ddc-034c-4fa3-8e9b-bde721cb94e5-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-r74vx\" (UID: \"02792ddc-034c-4fa3-8e9b-bde721cb94e5\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-r74vx" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.628164 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/f93960d1-f97a-4d2d-9e04-edb082755a9a-encryption-config\") pod \"apiserver-76f77b778f-4chwd\" (UID: \"f93960d1-f97a-4d2d-9e04-edb082755a9a\") " pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.630030 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f93960d1-f97a-4d2d-9e04-edb082755a9a-serving-cert\") pod \"apiserver-76f77b778f-4chwd\" (UID: \"f93960d1-f97a-4d2d-9e04-edb082755a9a\") " pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.639373 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.643502 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/34cd8aa7-5566-4824-8c72-10438437ef94-config\") pod \"console-operator-58897d9998-splm8\" (UID: \"34cd8aa7-5566-4824-8c72-10438437ef94\") " pod="openshift-console-operator/console-operator-58897d9998-splm8" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.647313 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a4b74ae-6e26-4983-b8aa-08f4a9935aca-config\") pod \"authentication-operator-69f744f599-gvnqg\" (UID: \"6a4b74ae-6e26-4983-b8aa-08f4a9935aca\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gvnqg" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.647690 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f93960d1-f97a-4d2d-9e04-edb082755a9a-config\") pod \"apiserver-76f77b778f-4chwd\" (UID: \"f93960d1-f97a-4d2d-9e04-edb082755a9a\") " pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.663470 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.665262 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/34cd8aa7-5566-4824-8c72-10438437ef94-trusted-ca\") pod \"console-operator-58897d9998-splm8\" (UID: \"34cd8aa7-5566-4824-8c72-10438437ef94\") " pod="openshift-console-operator/console-operator-58897d9998-splm8" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.678631 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.699826 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.718657 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.727457 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/3b01e1d1-14fa-4594-b52e-07b377965f5e-metrics-tls\") pod \"ingress-operator-5b745b69d9-5hrtq\" (UID: \"3b01e1d1-14fa-4594-b52e-07b377965f5e\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-5hrtq" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.745644 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.753519 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3b01e1d1-14fa-4594-b52e-07b377965f5e-trusted-ca\") pod \"ingress-operator-5b745b69d9-5hrtq\" (UID: \"3b01e1d1-14fa-4594-b52e-07b377965f5e\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-5hrtq" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.759424 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.779301 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.800077 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.819234 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.822071 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7e67da4-caf9-4204-94a3-22f4e562a827-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-sdrv4\" (UID: \"f7e67da4-caf9-4204-94a3-22f4e562a827\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-sdrv4" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.838796 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.860694 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.865754 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f7e67da4-caf9-4204-94a3-22f4e562a827-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-sdrv4\" (UID: \"f7e67da4-caf9-4204-94a3-22f4e562a827\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-sdrv4" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.866319 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.878299 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.883403 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/aeecd090-6524-4d8e-a0eb-e785b78f99c2-etcd-service-ca\") pod \"etcd-operator-b45778765-wmngc\" (UID: \"aeecd090-6524-4d8e-a0eb-e785b78f99c2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wmngc" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.899431 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.918732 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.938572 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.959185 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.963923 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aeecd090-6524-4d8e-a0eb-e785b78f99c2-config\") pod \"etcd-operator-b45778765-wmngc\" (UID: \"aeecd090-6524-4d8e-a0eb-e785b78f99c2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wmngc" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.979323 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.987901 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aeecd090-6524-4d8e-a0eb-e785b78f99c2-serving-cert\") pod \"etcd-operator-b45778765-wmngc\" (UID: \"aeecd090-6524-4d8e-a0eb-e785b78f99c2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wmngc" Nov 25 16:49:25 crc kubenswrapper[4812]: I1125 16:49:25.999353 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.004101 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/aeecd090-6524-4d8e-a0eb-e785b78f99c2-etcd-ca\") pod \"etcd-operator-b45778765-wmngc\" (UID: \"aeecd090-6524-4d8e-a0eb-e785b78f99c2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wmngc" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.018039 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.026920 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/aeecd090-6524-4d8e-a0eb-e785b78f99c2-etcd-client\") pod \"etcd-operator-b45778765-wmngc\" (UID: \"aeecd090-6524-4d8e-a0eb-e785b78f99c2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wmngc" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.039198 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.059102 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.079736 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.087660 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a1348b83-535e-4211-891a-d234f9e9c4ec-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-dfp6b\" (UID: \"a1348b83-535e-4211-891a-d234f9e9c4ec\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dfp6b" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.098828 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.105182 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1348b83-535e-4211-891a-d234f9e9c4ec-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-dfp6b\" (UID: \"a1348b83-535e-4211-891a-d234f9e9c4ec\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dfp6b" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.118837 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.139452 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.159491 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.179583 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.185365 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5f0654c4-3371-4451-938f-803e6f1ffa69-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jz82q\" (UID: \"5f0654c4-3371-4451-938f-803e6f1ffa69\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jz82q" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.199048 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.205677 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f0654c4-3371-4451-938f-803e6f1ffa69-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jz82q\" (UID: \"5f0654c4-3371-4451-938f-803e6f1ffa69\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jz82q" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.218960 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.239292 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.248930 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/f030671f-5121-4cb4-8163-5c65444c1896-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-fc6mw\" (UID: \"f030671f-5121-4cb4-8163-5c65444c1896\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-fc6mw" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.259585 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.280374 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.299314 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.319572 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.326480 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/13085b2e-8c8e-4023-b86b-99dbfe7c7b5f-proxy-tls\") pod \"machine-config-controller-84d6567774-w5fdp\" (UID: \"13085b2e-8c8e-4023-b86b-99dbfe7c7b5f\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w5fdp" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.338619 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.379499 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.398455 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.419128 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.439500 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.459742 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.479618 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.496524 4812 request.go:700] Waited for 1.002738037s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-operator-lifecycle-manager/secrets?fieldSelector=metadata.name%3Dcollect-profiles-dockercfg-kzf4t&limit=500&resourceVersion=0 Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.498800 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.518649 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.539251 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.558821 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.579305 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.607375 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.619453 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.639783 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.659464 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.679373 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.698945 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.720217 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.739870 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.758753 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.778812 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.799374 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.820282 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.839323 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.858785 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.879200 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.899549 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.919571 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.939706 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.960098 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.980342 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 25 16:49:26 crc kubenswrapper[4812]: I1125 16:49:26.998733 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.018941 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.039254 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.058316 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.079236 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.098706 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.132568 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tlfps\" (UniqueName: \"kubernetes.io/projected/c4b1ffb1-0cb4-4429-9eb4-e3f994b2b6c8-kube-api-access-tlfps\") pod \"machine-approver-56656f9798-hshsk\" (UID: \"c4b1ffb1-0cb4-4429-9eb4-e3f994b2b6c8\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hshsk" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.154267 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r5gk8\" (UniqueName: \"kubernetes.io/projected/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-kube-api-access-r5gk8\") pod \"console-f9d7485db-nj6w8\" (UID: \"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf\") " pod="openshift-console/console-f9d7485db-nj6w8" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.158595 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.179961 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.199341 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.218491 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.254140 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hshsk" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.257766 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rkd8c\" (UniqueName: \"kubernetes.io/projected/464e93da-ad92-4165-b1c2-6cede11ac006-kube-api-access-rkd8c\") pod \"openshift-config-operator-7777fb866f-64j56\" (UID: \"464e93da-ad92-4165-b1c2-6cede11ac006\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-64j56" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.276284 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kxzxx\" (UniqueName: \"kubernetes.io/projected/4662d425-aec1-4e58-845b-36ae7574da7a-kube-api-access-kxzxx\") pod \"dns-operator-744455d44c-td2bc\" (UID: \"4662d425-aec1-4e58-845b-36ae7574da7a\") " pod="openshift-dns-operator/dns-operator-744455d44c-td2bc" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.283449 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-64j56" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.294606 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-nj6w8" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.295797 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x6jfx\" (UniqueName: \"kubernetes.io/projected/b887db51-86fd-44fb-b146-21b546ae5345-kube-api-access-x6jfx\") pod \"cluster-image-registry-operator-dc59b4c8b-2f9nk\" (UID: \"b887db51-86fd-44fb-b146-21b546ae5345\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-2f9nk" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.319408 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-td2bc" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.321028 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b887db51-86fd-44fb-b146-21b546ae5345-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-2f9nk\" (UID: \"b887db51-86fd-44fb-b146-21b546ae5345\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-2f9nk" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.334601 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kgstc\" (UniqueName: \"kubernetes.io/projected/6a4b74ae-6e26-4983-b8aa-08f4a9935aca-kube-api-access-kgstc\") pod \"authentication-operator-69f744f599-gvnqg\" (UID: \"6a4b74ae-6e26-4983-b8aa-08f4a9935aca\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gvnqg" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.356841 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mfw2f\" (UniqueName: \"kubernetes.io/projected/135cae13-5b75-4d98-9c17-61448faddf90-kube-api-access-mfw2f\") pod \"downloads-7954f5f757-dd95m\" (UID: \"135cae13-5b75-4d98-9c17-61448faddf90\") " pod="openshift-console/downloads-7954f5f757-dd95m" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.375681 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2wzj5\" (UniqueName: \"kubernetes.io/projected/64f34f9a-3fdc-492d-a75f-93e4a3a8727f-kube-api-access-2wzj5\") pod \"router-default-5444994796-pxgkd\" (UID: \"64f34f9a-3fdc-492d-a75f-93e4a3a8727f\") " pod="openshift-ingress/router-default-5444994796-pxgkd" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.398456 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tcmfj\" (UniqueName: \"kubernetes.io/projected/3cc2b341-1b08-45ca-970f-b64350fbe88e-kube-api-access-tcmfj\") pod \"machine-api-operator-5694c8668f-kfwch\" (UID: \"3cc2b341-1b08-45ca-970f-b64350fbe88e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-kfwch" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.417690 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m78w5\" (UniqueName: \"kubernetes.io/projected/01a09641-0222-4bd8-af33-bf92edcc229c-kube-api-access-m78w5\") pod \"controller-manager-879f6c89f-9t6bj\" (UID: \"01a09641-0222-4bd8-af33-bf92edcc229c\") " pod="openshift-controller-manager/controller-manager-879f6c89f-9t6bj" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.429082 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-gvnqg" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.434286 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4bmmn\" (UniqueName: \"kubernetes.io/projected/8bad98aa-94be-4024-8cb5-dc6078ffec1f-kube-api-access-4bmmn\") pod \"oauth-openshift-558db77b4-fjv5r\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.440113 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-2f9nk" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.461389 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6npqx\" (UniqueName: \"kubernetes.io/projected/bf199acf-8845-4841-b653-d3f4b704f224-kube-api-access-6npqx\") pod \"cluster-samples-operator-665b6dd947-vntd5\" (UID: \"bf199acf-8845-4841-b653-d3f4b704f224\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vntd5" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.476669 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hshsk" event={"ID":"c4b1ffb1-0cb4-4429-9eb4-e3f994b2b6c8","Type":"ContainerStarted","Data":"c80ffe727e9abb556359fd8b788c40bb056c58d034d68fef23b75c5a0ec11351"} Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.484610 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-thv4n\" (UniqueName: \"kubernetes.io/projected/46c765da-7def-4c6e-8ac2-8da853bbb378-kube-api-access-thv4n\") pod \"apiserver-7bbb656c7d-rqlx6\" (UID: \"46c765da-7def-4c6e-8ac2-8da853bbb378\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-rqlx6" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.486464 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vntd5" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.496644 4812 request.go:700] Waited for 1.933453881s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-dd95m Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.500618 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b4qb5\" (UniqueName: \"kubernetes.io/projected/3c9ec92e-e01e-4a37-891a-ddc2293f0ced-kube-api-access-b4qb5\") pod \"openshift-apiserver-operator-796bbdcf4f-nbb7l\" (UID: \"3c9ec92e-e01e-4a37-891a-ddc2293f0ced\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nbb7l" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.506307 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-kfwch" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.513222 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-9t6bj" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.518992 4812 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.535264 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.544597 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.558295 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.572885 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-dd95m" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.582703 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.586102 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-64j56"] Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.601415 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.608797 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-pxgkd" Nov 25 16:49:27 crc kubenswrapper[4812]: W1125 16:49:27.614374 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod464e93da_ad92_4165_b1c2_6cede11ac006.slice/crio-8f8fc11ac54919caf16b5bbb55345be54bb9b893847016fa41a807eb686f5f8c WatchSource:0}: Error finding container 8f8fc11ac54919caf16b5bbb55345be54bb9b893847016fa41a807eb686f5f8c: Status 404 returned error can't find the container with id 8f8fc11ac54919caf16b5bbb55345be54bb9b893847016fa41a807eb686f5f8c Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.621851 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.641301 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.641334 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-td2bc"] Nov 25 16:49:27 crc kubenswrapper[4812]: W1125 16:49:27.655472 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod64f34f9a_3fdc_492d_a75f_93e4a3a8727f.slice/crio-627bd39bb0398dd9808a42e1896e355b97d9fa9bd6762b1ed987c8786978ea55 WatchSource:0}: Error finding container 627bd39bb0398dd9808a42e1896e355b97d9fa9bd6762b1ed987c8786978ea55: Status 404 returned error can't find the container with id 627bd39bb0398dd9808a42e1896e355b97d9fa9bd6762b1ed987c8786978ea55 Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.658385 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.678865 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.710347 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nbb7l" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.718411 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dqjc9\" (UniqueName: \"kubernetes.io/projected/a1348b83-535e-4211-891a-d234f9e9c4ec-kube-api-access-dqjc9\") pod \"kube-storage-version-migrator-operator-b67b599dd-dfp6b\" (UID: \"a1348b83-535e-4211-891a-d234f9e9c4ec\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dfp6b" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.740446 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lhrj8\" (UniqueName: \"kubernetes.io/projected/aeecd090-6524-4d8e-a0eb-e785b78f99c2-kube-api-access-lhrj8\") pod \"etcd-operator-b45778765-wmngc\" (UID: \"aeecd090-6524-4d8e-a0eb-e785b78f99c2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wmngc" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.746955 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dfp6b" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.759322 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-rqlx6" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.760007 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-nj6w8"] Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.770584 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/02792ddc-034c-4fa3-8e9b-bde721cb94e5-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-r74vx\" (UID: \"02792ddc-034c-4fa3-8e9b-bde721cb94e5\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-r74vx" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.777458 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3b01e1d1-14fa-4594-b52e-07b377965f5e-bound-sa-token\") pod \"ingress-operator-5b745b69d9-5hrtq\" (UID: \"3b01e1d1-14fa-4594-b52e-07b377965f5e\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-5hrtq" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.800134 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bfsk7\" (UniqueName: \"kubernetes.io/projected/f030671f-5121-4cb4-8163-5c65444c1896-kube-api-access-bfsk7\") pod \"multus-admission-controller-857f4d67dd-fc6mw\" (UID: \"f030671f-5121-4cb4-8163-5c65444c1896\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-fc6mw" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.813999 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4546r\" (UniqueName: \"kubernetes.io/projected/f93960d1-f97a-4d2d-9e04-edb082755a9a-kube-api-access-4546r\") pod \"apiserver-76f77b778f-4chwd\" (UID: \"f93960d1-f97a-4d2d-9e04-edb082755a9a\") " pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.838732 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-kfwch"] Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.840523 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p6qds\" (UniqueName: \"kubernetes.io/projected/e9cf419e-cde9-4a86-a206-24ac78d1e475-kube-api-access-p6qds\") pod \"machine-config-operator-74547568cd-5gv9p\" (UID: \"e9cf419e-cde9-4a86-a206-24ac78d1e475\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-5gv9p" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.857376 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jhl27\" (UniqueName: \"kubernetes.io/projected/13085b2e-8c8e-4023-b86b-99dbfe7c7b5f-kube-api-access-jhl27\") pod \"machine-config-controller-84d6567774-w5fdp\" (UID: \"13085b2e-8c8e-4023-b86b-99dbfe7c7b5f\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w5fdp" Nov 25 16:49:27 crc kubenswrapper[4812]: W1125 16:49:27.863210 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3cc2b341_1b08_45ca_970f_b64350fbe88e.slice/crio-c0b21b59171e2d815de7a5b41cc91da825601372cb558f46df8dcac4851d5adb WatchSource:0}: Error finding container c0b21b59171e2d815de7a5b41cc91da825601372cb558f46df8dcac4851d5adb: Status 404 returned error can't find the container with id c0b21b59171e2d815de7a5b41cc91da825601372cb558f46df8dcac4851d5adb Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.883266 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ndns5\" (UniqueName: \"kubernetes.io/projected/3b01e1d1-14fa-4594-b52e-07b377965f5e-kube-api-access-ndns5\") pod \"ingress-operator-5b745b69d9-5hrtq\" (UID: \"3b01e1d1-14fa-4594-b52e-07b377965f5e\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-5hrtq" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.899385 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-45k9c\" (UniqueName: \"kubernetes.io/projected/34cd8aa7-5566-4824-8c72-10438437ef94-kube-api-access-45k9c\") pod \"console-operator-58897d9998-splm8\" (UID: \"34cd8aa7-5566-4824-8c72-10438437ef94\") " pod="openshift-console-operator/console-operator-58897d9998-splm8" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.905651 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-fjv5r"] Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.909861 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-dd95m"] Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.917059 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8z58d\" (UniqueName: \"kubernetes.io/projected/f7e67da4-caf9-4204-94a3-22f4e562a827-kube-api-access-8z58d\") pod \"openshift-controller-manager-operator-756b6f6bc6-sdrv4\" (UID: \"f7e67da4-caf9-4204-94a3-22f4e562a827\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-sdrv4" Nov 25 16:49:27 crc kubenswrapper[4812]: W1125 16:49:27.936058 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8bad98aa_94be_4024_8cb5_dc6078ffec1f.slice/crio-2e5b182a27686b156843e05e272fbad75f7befd37c685ae9883213468f05a11d WatchSource:0}: Error finding container 2e5b182a27686b156843e05e272fbad75f7befd37c685ae9883213468f05a11d: Status 404 returned error can't find the container with id 2e5b182a27686b156843e05e272fbad75f7befd37c685ae9883213468f05a11d Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.947248 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-9t6bj"] Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.947682 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5f0654c4-3371-4451-938f-803e6f1ffa69-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jz82q\" (UID: \"5f0654c4-3371-4451-938f-803e6f1ffa69\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jz82q" Nov 25 16:49:27 crc kubenswrapper[4812]: W1125 16:49:27.954117 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod135cae13_5b75_4d98_9c17_61448faddf90.slice/crio-f4f4a2f5741a2cf490d29d8f65e527d43b20a5ca40e2f44507b764afd761991e WatchSource:0}: Error finding container f4f4a2f5741a2cf490d29d8f65e527d43b20a5ca40e2f44507b764afd761991e: Status 404 returned error can't find the container with id f4f4a2f5741a2cf490d29d8f65e527d43b20a5ca40e2f44507b764afd761991e Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.963661 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/3c23f6ed-bcde-4571-b631-c90ce20d9348-registry-certificates\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.963692 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rcvmz\" (UniqueName: \"kubernetes.io/projected/5cca6293-58b3-40fc-939d-f43aa66a57e7-kube-api-access-rcvmz\") pod \"migrator-59844c95c7-hw9bx\" (UID: \"5cca6293-58b3-40fc-939d-f43aa66a57e7\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-hw9bx" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.963709 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/3c23f6ed-bcde-4571-b631-c90ce20d9348-installation-pull-secrets\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.963727 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3c23f6ed-bcde-4571-b631-c90ce20d9348-bound-sa-token\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.963755 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/3c23f6ed-bcde-4571-b631-c90ce20d9348-ca-trust-extracted\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.963768 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fhsns\" (UniqueName: \"kubernetes.io/projected/3c23f6ed-bcde-4571-b631-c90ce20d9348-kube-api-access-fhsns\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.963843 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.963924 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/3c23f6ed-bcde-4571-b631-c90ce20d9348-registry-tls\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.963980 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3c23f6ed-bcde-4571-b631-c90ce20d9348-trusted-ca\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:27 crc kubenswrapper[4812]: E1125 16:49:27.964565 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:28.464547705 +0000 UTC m=+143.304689800 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.983500 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.991910 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-r74vx" Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.992979 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-gvnqg"] Nov 25 16:49:27 crc kubenswrapper[4812]: I1125 16:49:27.993936 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-2f9nk"] Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.005700 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-5gv9p" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.011576 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-splm8" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.018926 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-5hrtq" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.033796 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-sdrv4" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.037948 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-wmngc" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.054991 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jz82q" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.071369 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vntd5"] Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.076406 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.076755 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/b1a50fff-1913-47bd-be58-05284eb4e40e-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-nkjmk\" (UID: \"b1a50fff-1913-47bd-be58-05284eb4e40e\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-nkjmk" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.076790 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6ae73445-81df-49ec-9c77-da00d65eef40-serving-cert\") pod \"route-controller-manager-6576b87f9c-dgwd6\" (UID: \"6ae73445-81df-49ec-9c77-da00d65eef40\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dgwd6" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.076837 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/a4485315-f2bc-47be-80a6-26508fa8719f-profile-collector-cert\") pod \"olm-operator-6b444d44fb-8p5fc\" (UID: \"a4485315-f2bc-47be-80a6-26508fa8719f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8p5fc" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.076902 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6ae73445-81df-49ec-9c77-da00d65eef40-config\") pod \"route-controller-manager-6576b87f9c-dgwd6\" (UID: \"6ae73445-81df-49ec-9c77-da00d65eef40\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dgwd6" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.076961 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/e3cdc2a6-f0b4-4b82-941d-3984c4ba8d2a-signing-key\") pod \"service-ca-9c57cc56f-x9d4d\" (UID: \"e3cdc2a6-f0b4-4b82-941d-3984c4ba8d2a\") " pod="openshift-service-ca/service-ca-9c57cc56f-x9d4d" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.077027 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l4gfc\" (UniqueName: \"kubernetes.io/projected/a559cd93-6cab-452d-8ff4-abbc835343bb-kube-api-access-l4gfc\") pod \"ingress-canary-m57d7\" (UID: \"a559cd93-6cab-452d-8ff4-abbc835343bb\") " pod="openshift-ingress-canary/ingress-canary-m57d7" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.077056 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d7bb301b-f2a3-4526-b953-d6aa12d8621c-config-volume\") pod \"collect-profiles-29401485-brhb9\" (UID: \"d7bb301b-f2a3-4526-b953-d6aa12d8621c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401485-brhb9" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.077084 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/96e9ec11-59a4-42e6-b0e1-306160e54c23-apiservice-cert\") pod \"packageserver-d55dfcdfc-kxr75\" (UID: \"96e9ec11-59a4-42e6-b0e1-306160e54c23\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kxr75" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.077142 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6ae73445-81df-49ec-9c77-da00d65eef40-client-ca\") pod \"route-controller-manager-6576b87f9c-dgwd6\" (UID: \"6ae73445-81df-49ec-9c77-da00d65eef40\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dgwd6" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.077181 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b297b670-7982-4cd1-8d8e-b1a3cda0dce1-metrics-tls\") pod \"dns-default-xv59p\" (UID: \"b297b670-7982-4cd1-8d8e-b1a3cda0dce1\") " pod="openshift-dns/dns-default-xv59p" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.077210 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/be7271bc-142b-4cff-aabd-8a69d6373849-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-v7lp8\" (UID: \"be7271bc-142b-4cff-aabd-8a69d6373849\") " pod="openshift-marketplace/marketplace-operator-79b997595-v7lp8" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.077252 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/3da5ea57-7db8-4799-b1eb-b18e1e9185ef-registration-dir\") pod \"csi-hostpathplugin-t7x6w\" (UID: \"3da5ea57-7db8-4799-b1eb-b18e1e9185ef\") " pod="hostpath-provisioner/csi-hostpathplugin-t7x6w" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.077282 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-24xmz\" (UniqueName: \"kubernetes.io/projected/8eab44d8-62dd-4507-9ffc-cb571fe12289-kube-api-access-24xmz\") pod \"machine-config-server-bwjqq\" (UID: \"8eab44d8-62dd-4507-9ffc-cb571fe12289\") " pod="openshift-machine-config-operator/machine-config-server-bwjqq" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.077336 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b56e5790-d6cb-4643-ab8a-10331bf20c3a-config\") pod \"service-ca-operator-777779d784-brj9b\" (UID: \"b56e5790-d6cb-4643-ab8a-10331bf20c3a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-brj9b" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.077359 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/3da5ea57-7db8-4799-b1eb-b18e1e9185ef-mountpoint-dir\") pod \"csi-hostpathplugin-t7x6w\" (UID: \"3da5ea57-7db8-4799-b1eb-b18e1e9185ef\") " pod="hostpath-provisioner/csi-hostpathplugin-t7x6w" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.077386 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d7bb301b-f2a3-4526-b953-d6aa12d8621c-secret-volume\") pod \"collect-profiles-29401485-brhb9\" (UID: \"d7bb301b-f2a3-4526-b953-d6aa12d8621c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401485-brhb9" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.077564 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b297b670-7982-4cd1-8d8e-b1a3cda0dce1-config-volume\") pod \"dns-default-xv59p\" (UID: \"b297b670-7982-4cd1-8d8e-b1a3cda0dce1\") " pod="openshift-dns/dns-default-xv59p" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.077650 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/10eaf74e-432a-4036-8d38-531569a99ac9-srv-cert\") pod \"catalog-operator-68c6474976-sqpll\" (UID: \"10eaf74e-432a-4036-8d38-531569a99ac9\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sqpll" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.077713 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/29ba287e-6a40-474a-88ed-c3cd0c9657b4-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-pt5mz\" (UID: \"29ba287e-6a40-474a-88ed-c3cd0c9657b4\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-pt5mz" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.077780 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/3c23f6ed-bcde-4571-b631-c90ce20d9348-registry-tls\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.077811 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v6hst\" (UniqueName: \"kubernetes.io/projected/b297b670-7982-4cd1-8d8e-b1a3cda0dce1-kube-api-access-v6hst\") pod \"dns-default-xv59p\" (UID: \"b297b670-7982-4cd1-8d8e-b1a3cda0dce1\") " pod="openshift-dns/dns-default-xv59p" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.077865 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/a4485315-f2bc-47be-80a6-26508fa8719f-srv-cert\") pod \"olm-operator-6b444d44fb-8p5fc\" (UID: \"a4485315-f2bc-47be-80a6-26508fa8719f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8p5fc" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.077955 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/84c147c9-ea21-4479-8f78-0f12faa15a2e-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-7cvbf\" (UID: \"84c147c9-ea21-4479-8f78-0f12faa15a2e\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7cvbf" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.077984 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/10eaf74e-432a-4036-8d38-531569a99ac9-profile-collector-cert\") pod \"catalog-operator-68c6474976-sqpll\" (UID: \"10eaf74e-432a-4036-8d38-531569a99ac9\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sqpll" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.078012 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3c23f6ed-bcde-4571-b631-c90ce20d9348-trusted-ca\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.078118 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2n7nm\" (UniqueName: \"kubernetes.io/projected/e3cdc2a6-f0b4-4b82-941d-3984c4ba8d2a-kube-api-access-2n7nm\") pod \"service-ca-9c57cc56f-x9d4d\" (UID: \"e3cdc2a6-f0b4-4b82-941d-3984c4ba8d2a\") " pod="openshift-service-ca/service-ca-9c57cc56f-x9d4d" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.078145 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/84c147c9-ea21-4479-8f78-0f12faa15a2e-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-7cvbf\" (UID: \"84c147c9-ea21-4479-8f78-0f12faa15a2e\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7cvbf" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.078254 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/3c23f6ed-bcde-4571-b631-c90ce20d9348-registry-certificates\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.078287 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rcvmz\" (UniqueName: \"kubernetes.io/projected/5cca6293-58b3-40fc-939d-f43aa66a57e7-kube-api-access-rcvmz\") pod \"migrator-59844c95c7-hw9bx\" (UID: \"5cca6293-58b3-40fc-939d-f43aa66a57e7\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-hw9bx" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.078318 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dxmgg\" (UniqueName: \"kubernetes.io/projected/b56e5790-d6cb-4643-ab8a-10331bf20c3a-kube-api-access-dxmgg\") pod \"service-ca-operator-777779d784-brj9b\" (UID: \"b56e5790-d6cb-4643-ab8a-10331bf20c3a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-brj9b" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.078349 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t27j6\" (UniqueName: \"kubernetes.io/projected/3da5ea57-7db8-4799-b1eb-b18e1e9185ef-kube-api-access-t27j6\") pod \"csi-hostpathplugin-t7x6w\" (UID: \"3da5ea57-7db8-4799-b1eb-b18e1e9185ef\") " pod="hostpath-provisioner/csi-hostpathplugin-t7x6w" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.078376 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/3da5ea57-7db8-4799-b1eb-b18e1e9185ef-socket-dir\") pod \"csi-hostpathplugin-t7x6w\" (UID: \"3da5ea57-7db8-4799-b1eb-b18e1e9185ef\") " pod="hostpath-provisioner/csi-hostpathplugin-t7x6w" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.078402 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/96e9ec11-59a4-42e6-b0e1-306160e54c23-webhook-cert\") pod \"packageserver-d55dfcdfc-kxr75\" (UID: \"96e9ec11-59a4-42e6-b0e1-306160e54c23\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kxr75" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.078465 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/3c23f6ed-bcde-4571-b631-c90ce20d9348-installation-pull-secrets\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.078498 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p47df\" (UniqueName: \"kubernetes.io/projected/b1a50fff-1913-47bd-be58-05284eb4e40e-kube-api-access-p47df\") pod \"package-server-manager-789f6589d5-nkjmk\" (UID: \"b1a50fff-1913-47bd-be58-05284eb4e40e\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-nkjmk" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.078591 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3c23f6ed-bcde-4571-b631-c90ce20d9348-bound-sa-token\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.078691 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pbdkf\" (UniqueName: \"kubernetes.io/projected/a4485315-f2bc-47be-80a6-26508fa8719f-kube-api-access-pbdkf\") pod \"olm-operator-6b444d44fb-8p5fc\" (UID: \"a4485315-f2bc-47be-80a6-26508fa8719f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8p5fc" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.078766 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4bn2h\" (UniqueName: \"kubernetes.io/projected/10eaf74e-432a-4036-8d38-531569a99ac9-kube-api-access-4bn2h\") pod \"catalog-operator-68c6474976-sqpll\" (UID: \"10eaf74e-432a-4036-8d38-531569a99ac9\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sqpll" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.078797 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x2jx4\" (UniqueName: \"kubernetes.io/projected/96e9ec11-59a4-42e6-b0e1-306160e54c23-kube-api-access-x2jx4\") pod \"packageserver-d55dfcdfc-kxr75\" (UID: \"96e9ec11-59a4-42e6-b0e1-306160e54c23\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kxr75" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.078820 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vjqx4\" (UniqueName: \"kubernetes.io/projected/d7bb301b-f2a3-4526-b953-d6aa12d8621c-kube-api-access-vjqx4\") pod \"collect-profiles-29401485-brhb9\" (UID: \"d7bb301b-f2a3-4526-b953-d6aa12d8621c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401485-brhb9" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.078854 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/8eab44d8-62dd-4507-9ffc-cb571fe12289-node-bootstrap-token\") pod \"machine-config-server-bwjqq\" (UID: \"8eab44d8-62dd-4507-9ffc-cb571fe12289\") " pod="openshift-machine-config-operator/machine-config-server-bwjqq" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.078904 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/3c23f6ed-bcde-4571-b631-c90ce20d9348-ca-trust-extracted\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.078928 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fhsns\" (UniqueName: \"kubernetes.io/projected/3c23f6ed-bcde-4571-b631-c90ce20d9348-kube-api-access-fhsns\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.078955 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/be7271bc-142b-4cff-aabd-8a69d6373849-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-v7lp8\" (UID: \"be7271bc-142b-4cff-aabd-8a69d6373849\") " pod="openshift-marketplace/marketplace-operator-79b997595-v7lp8" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.079001 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/e3cdc2a6-f0b4-4b82-941d-3984c4ba8d2a-signing-cabundle\") pod \"service-ca-9c57cc56f-x9d4d\" (UID: \"e3cdc2a6-f0b4-4b82-941d-3984c4ba8d2a\") " pod="openshift-service-ca/service-ca-9c57cc56f-x9d4d" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.079165 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a559cd93-6cab-452d-8ff4-abbc835343bb-cert\") pod \"ingress-canary-m57d7\" (UID: \"a559cd93-6cab-452d-8ff4-abbc835343bb\") " pod="openshift-ingress-canary/ingress-canary-m57d7" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.079193 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b58lk\" (UniqueName: \"kubernetes.io/projected/6ae73445-81df-49ec-9c77-da00d65eef40-kube-api-access-b58lk\") pod \"route-controller-manager-6576b87f9c-dgwd6\" (UID: \"6ae73445-81df-49ec-9c77-da00d65eef40\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dgwd6" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.079224 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c4js7\" (UniqueName: \"kubernetes.io/projected/be7271bc-142b-4cff-aabd-8a69d6373849-kube-api-access-c4js7\") pod \"marketplace-operator-79b997595-v7lp8\" (UID: \"be7271bc-142b-4cff-aabd-8a69d6373849\") " pod="openshift-marketplace/marketplace-operator-79b997595-v7lp8" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.079319 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/96e9ec11-59a4-42e6-b0e1-306160e54c23-tmpfs\") pod \"packageserver-d55dfcdfc-kxr75\" (UID: \"96e9ec11-59a4-42e6-b0e1-306160e54c23\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kxr75" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.079380 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b56e5790-d6cb-4643-ab8a-10331bf20c3a-serving-cert\") pod \"service-ca-operator-777779d784-brj9b\" (UID: \"b56e5790-d6cb-4643-ab8a-10331bf20c3a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-brj9b" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.079408 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bqhpr\" (UniqueName: \"kubernetes.io/projected/29ba287e-6a40-474a-88ed-c3cd0c9657b4-kube-api-access-bqhpr\") pod \"control-plane-machine-set-operator-78cbb6b69f-pt5mz\" (UID: \"29ba287e-6a40-474a-88ed-c3cd0c9657b4\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-pt5mz" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.084257 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/3da5ea57-7db8-4799-b1eb-b18e1e9185ef-plugins-dir\") pod \"csi-hostpathplugin-t7x6w\" (UID: \"3da5ea57-7db8-4799-b1eb-b18e1e9185ef\") " pod="hostpath-provisioner/csi-hostpathplugin-t7x6w" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.084370 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/3da5ea57-7db8-4799-b1eb-b18e1e9185ef-csi-data-dir\") pod \"csi-hostpathplugin-t7x6w\" (UID: \"3da5ea57-7db8-4799-b1eb-b18e1e9185ef\") " pod="hostpath-provisioner/csi-hostpathplugin-t7x6w" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.084478 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84c147c9-ea21-4479-8f78-0f12faa15a2e-config\") pod \"kube-controller-manager-operator-78b949d7b-7cvbf\" (UID: \"84c147c9-ea21-4479-8f78-0f12faa15a2e\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7cvbf" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.084600 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/8eab44d8-62dd-4507-9ffc-cb571fe12289-certs\") pod \"machine-config-server-bwjqq\" (UID: \"8eab44d8-62dd-4507-9ffc-cb571fe12289\") " pod="openshift-machine-config-operator/machine-config-server-bwjqq" Nov 25 16:49:28 crc kubenswrapper[4812]: E1125 16:49:28.085451 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:28.585429962 +0000 UTC m=+143.425572057 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.087040 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w5fdp" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.098045 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-fc6mw" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.100147 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/3c23f6ed-bcde-4571-b631-c90ce20d9348-registry-certificates\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.103681 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3c23f6ed-bcde-4571-b631-c90ce20d9348-trusted-ca\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.106954 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/3c23f6ed-bcde-4571-b631-c90ce20d9348-registry-tls\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.107280 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/3c23f6ed-bcde-4571-b631-c90ce20d9348-installation-pull-secrets\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.118069 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/3c23f6ed-bcde-4571-b631-c90ce20d9348-ca-trust-extracted\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.124516 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nbb7l"] Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.126858 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3c23f6ed-bcde-4571-b631-c90ce20d9348-bound-sa-token\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.140273 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-rqlx6"] Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.147374 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fhsns\" (UniqueName: \"kubernetes.io/projected/3c23f6ed-bcde-4571-b631-c90ce20d9348-kube-api-access-fhsns\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.180692 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rcvmz\" (UniqueName: \"kubernetes.io/projected/5cca6293-58b3-40fc-939d-f43aa66a57e7-kube-api-access-rcvmz\") pod \"migrator-59844c95c7-hw9bx\" (UID: \"5cca6293-58b3-40fc-939d-f43aa66a57e7\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-hw9bx" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.186216 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/b1a50fff-1913-47bd-be58-05284eb4e40e-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-nkjmk\" (UID: \"b1a50fff-1913-47bd-be58-05284eb4e40e\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-nkjmk" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.186258 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6ae73445-81df-49ec-9c77-da00d65eef40-serving-cert\") pod \"route-controller-manager-6576b87f9c-dgwd6\" (UID: \"6ae73445-81df-49ec-9c77-da00d65eef40\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dgwd6" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.186276 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/a4485315-f2bc-47be-80a6-26508fa8719f-profile-collector-cert\") pod \"olm-operator-6b444d44fb-8p5fc\" (UID: \"a4485315-f2bc-47be-80a6-26508fa8719f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8p5fc" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.186294 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6ae73445-81df-49ec-9c77-da00d65eef40-config\") pod \"route-controller-manager-6576b87f9c-dgwd6\" (UID: \"6ae73445-81df-49ec-9c77-da00d65eef40\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dgwd6" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.186311 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/e3cdc2a6-f0b4-4b82-941d-3984c4ba8d2a-signing-key\") pod \"service-ca-9c57cc56f-x9d4d\" (UID: \"e3cdc2a6-f0b4-4b82-941d-3984c4ba8d2a\") " pod="openshift-service-ca/service-ca-9c57cc56f-x9d4d" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.186329 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l4gfc\" (UniqueName: \"kubernetes.io/projected/a559cd93-6cab-452d-8ff4-abbc835343bb-kube-api-access-l4gfc\") pod \"ingress-canary-m57d7\" (UID: \"a559cd93-6cab-452d-8ff4-abbc835343bb\") " pod="openshift-ingress-canary/ingress-canary-m57d7" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.186343 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d7bb301b-f2a3-4526-b953-d6aa12d8621c-config-volume\") pod \"collect-profiles-29401485-brhb9\" (UID: \"d7bb301b-f2a3-4526-b953-d6aa12d8621c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401485-brhb9" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.186360 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/96e9ec11-59a4-42e6-b0e1-306160e54c23-apiservice-cert\") pod \"packageserver-d55dfcdfc-kxr75\" (UID: \"96e9ec11-59a4-42e6-b0e1-306160e54c23\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kxr75" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.186377 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b297b670-7982-4cd1-8d8e-b1a3cda0dce1-metrics-tls\") pod \"dns-default-xv59p\" (UID: \"b297b670-7982-4cd1-8d8e-b1a3cda0dce1\") " pod="openshift-dns/dns-default-xv59p" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.186403 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/be7271bc-142b-4cff-aabd-8a69d6373849-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-v7lp8\" (UID: \"be7271bc-142b-4cff-aabd-8a69d6373849\") " pod="openshift-marketplace/marketplace-operator-79b997595-v7lp8" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.186421 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6ae73445-81df-49ec-9c77-da00d65eef40-client-ca\") pod \"route-controller-manager-6576b87f9c-dgwd6\" (UID: \"6ae73445-81df-49ec-9c77-da00d65eef40\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dgwd6" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.186436 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/3da5ea57-7db8-4799-b1eb-b18e1e9185ef-registration-dir\") pod \"csi-hostpathplugin-t7x6w\" (UID: \"3da5ea57-7db8-4799-b1eb-b18e1e9185ef\") " pod="hostpath-provisioner/csi-hostpathplugin-t7x6w" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.186451 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-24xmz\" (UniqueName: \"kubernetes.io/projected/8eab44d8-62dd-4507-9ffc-cb571fe12289-kube-api-access-24xmz\") pod \"machine-config-server-bwjqq\" (UID: \"8eab44d8-62dd-4507-9ffc-cb571fe12289\") " pod="openshift-machine-config-operator/machine-config-server-bwjqq" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.186465 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/3da5ea57-7db8-4799-b1eb-b18e1e9185ef-mountpoint-dir\") pod \"csi-hostpathplugin-t7x6w\" (UID: \"3da5ea57-7db8-4799-b1eb-b18e1e9185ef\") " pod="hostpath-provisioner/csi-hostpathplugin-t7x6w" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.186480 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d7bb301b-f2a3-4526-b953-d6aa12d8621c-secret-volume\") pod \"collect-profiles-29401485-brhb9\" (UID: \"d7bb301b-f2a3-4526-b953-d6aa12d8621c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401485-brhb9" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.186495 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b56e5790-d6cb-4643-ab8a-10331bf20c3a-config\") pod \"service-ca-operator-777779d784-brj9b\" (UID: \"b56e5790-d6cb-4643-ab8a-10331bf20c3a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-brj9b" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.186513 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b297b670-7982-4cd1-8d8e-b1a3cda0dce1-config-volume\") pod \"dns-default-xv59p\" (UID: \"b297b670-7982-4cd1-8d8e-b1a3cda0dce1\") " pod="openshift-dns/dns-default-xv59p" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.187389 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d7bb301b-f2a3-4526-b953-d6aa12d8621c-config-volume\") pod \"collect-profiles-29401485-brhb9\" (UID: \"d7bb301b-f2a3-4526-b953-d6aa12d8621c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401485-brhb9" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.189314 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/10eaf74e-432a-4036-8d38-531569a99ac9-srv-cert\") pod \"catalog-operator-68c6474976-sqpll\" (UID: \"10eaf74e-432a-4036-8d38-531569a99ac9\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sqpll" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.189377 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/29ba287e-6a40-474a-88ed-c3cd0c9657b4-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-pt5mz\" (UID: \"29ba287e-6a40-474a-88ed-c3cd0c9657b4\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-pt5mz" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.189417 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v6hst\" (UniqueName: \"kubernetes.io/projected/b297b670-7982-4cd1-8d8e-b1a3cda0dce1-kube-api-access-v6hst\") pod \"dns-default-xv59p\" (UID: \"b297b670-7982-4cd1-8d8e-b1a3cda0dce1\") " pod="openshift-dns/dns-default-xv59p" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.189449 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/a4485315-f2bc-47be-80a6-26508fa8719f-srv-cert\") pod \"olm-operator-6b444d44fb-8p5fc\" (UID: \"a4485315-f2bc-47be-80a6-26508fa8719f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8p5fc" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.189483 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/84c147c9-ea21-4479-8f78-0f12faa15a2e-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-7cvbf\" (UID: \"84c147c9-ea21-4479-8f78-0f12faa15a2e\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7cvbf" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.189507 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/10eaf74e-432a-4036-8d38-531569a99ac9-profile-collector-cert\") pod \"catalog-operator-68c6474976-sqpll\" (UID: \"10eaf74e-432a-4036-8d38-531569a99ac9\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sqpll" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.189553 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2n7nm\" (UniqueName: \"kubernetes.io/projected/e3cdc2a6-f0b4-4b82-941d-3984c4ba8d2a-kube-api-access-2n7nm\") pod \"service-ca-9c57cc56f-x9d4d\" (UID: \"e3cdc2a6-f0b4-4b82-941d-3984c4ba8d2a\") " pod="openshift-service-ca/service-ca-9c57cc56f-x9d4d" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.189573 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/84c147c9-ea21-4479-8f78-0f12faa15a2e-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-7cvbf\" (UID: \"84c147c9-ea21-4479-8f78-0f12faa15a2e\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7cvbf" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.189604 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dxmgg\" (UniqueName: \"kubernetes.io/projected/b56e5790-d6cb-4643-ab8a-10331bf20c3a-kube-api-access-dxmgg\") pod \"service-ca-operator-777779d784-brj9b\" (UID: \"b56e5790-d6cb-4643-ab8a-10331bf20c3a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-brj9b" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.189620 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t27j6\" (UniqueName: \"kubernetes.io/projected/3da5ea57-7db8-4799-b1eb-b18e1e9185ef-kube-api-access-t27j6\") pod \"csi-hostpathplugin-t7x6w\" (UID: \"3da5ea57-7db8-4799-b1eb-b18e1e9185ef\") " pod="hostpath-provisioner/csi-hostpathplugin-t7x6w" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.189640 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/3da5ea57-7db8-4799-b1eb-b18e1e9185ef-socket-dir\") pod \"csi-hostpathplugin-t7x6w\" (UID: \"3da5ea57-7db8-4799-b1eb-b18e1e9185ef\") " pod="hostpath-provisioner/csi-hostpathplugin-t7x6w" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.189661 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/96e9ec11-59a4-42e6-b0e1-306160e54c23-webhook-cert\") pod \"packageserver-d55dfcdfc-kxr75\" (UID: \"96e9ec11-59a4-42e6-b0e1-306160e54c23\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kxr75" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.189680 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p47df\" (UniqueName: \"kubernetes.io/projected/b1a50fff-1913-47bd-be58-05284eb4e40e-kube-api-access-p47df\") pod \"package-server-manager-789f6589d5-nkjmk\" (UID: \"b1a50fff-1913-47bd-be58-05284eb4e40e\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-nkjmk" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.189719 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pbdkf\" (UniqueName: \"kubernetes.io/projected/a4485315-f2bc-47be-80a6-26508fa8719f-kube-api-access-pbdkf\") pod \"olm-operator-6b444d44fb-8p5fc\" (UID: \"a4485315-f2bc-47be-80a6-26508fa8719f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8p5fc" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.189748 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x2jx4\" (UniqueName: \"kubernetes.io/projected/96e9ec11-59a4-42e6-b0e1-306160e54c23-kube-api-access-x2jx4\") pod \"packageserver-d55dfcdfc-kxr75\" (UID: \"96e9ec11-59a4-42e6-b0e1-306160e54c23\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kxr75" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.189767 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4bn2h\" (UniqueName: \"kubernetes.io/projected/10eaf74e-432a-4036-8d38-531569a99ac9-kube-api-access-4bn2h\") pod \"catalog-operator-68c6474976-sqpll\" (UID: \"10eaf74e-432a-4036-8d38-531569a99ac9\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sqpll" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.189787 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vjqx4\" (UniqueName: \"kubernetes.io/projected/d7bb301b-f2a3-4526-b953-d6aa12d8621c-kube-api-access-vjqx4\") pod \"collect-profiles-29401485-brhb9\" (UID: \"d7bb301b-f2a3-4526-b953-d6aa12d8621c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401485-brhb9" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.189804 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/8eab44d8-62dd-4507-9ffc-cb571fe12289-node-bootstrap-token\") pod \"machine-config-server-bwjqq\" (UID: \"8eab44d8-62dd-4507-9ffc-cb571fe12289\") " pod="openshift-machine-config-operator/machine-config-server-bwjqq" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.189827 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/be7271bc-142b-4cff-aabd-8a69d6373849-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-v7lp8\" (UID: \"be7271bc-142b-4cff-aabd-8a69d6373849\") " pod="openshift-marketplace/marketplace-operator-79b997595-v7lp8" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.189847 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/e3cdc2a6-f0b4-4b82-941d-3984c4ba8d2a-signing-cabundle\") pod \"service-ca-9c57cc56f-x9d4d\" (UID: \"e3cdc2a6-f0b4-4b82-941d-3984c4ba8d2a\") " pod="openshift-service-ca/service-ca-9c57cc56f-x9d4d" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.189890 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a559cd93-6cab-452d-8ff4-abbc835343bb-cert\") pod \"ingress-canary-m57d7\" (UID: \"a559cd93-6cab-452d-8ff4-abbc835343bb\") " pod="openshift-ingress-canary/ingress-canary-m57d7" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.189910 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b58lk\" (UniqueName: \"kubernetes.io/projected/6ae73445-81df-49ec-9c77-da00d65eef40-kube-api-access-b58lk\") pod \"route-controller-manager-6576b87f9c-dgwd6\" (UID: \"6ae73445-81df-49ec-9c77-da00d65eef40\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dgwd6" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.189927 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c4js7\" (UniqueName: \"kubernetes.io/projected/be7271bc-142b-4cff-aabd-8a69d6373849-kube-api-access-c4js7\") pod \"marketplace-operator-79b997595-v7lp8\" (UID: \"be7271bc-142b-4cff-aabd-8a69d6373849\") " pod="openshift-marketplace/marketplace-operator-79b997595-v7lp8" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.189951 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/96e9ec11-59a4-42e6-b0e1-306160e54c23-tmpfs\") pod \"packageserver-d55dfcdfc-kxr75\" (UID: \"96e9ec11-59a4-42e6-b0e1-306160e54c23\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kxr75" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.189976 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b56e5790-d6cb-4643-ab8a-10331bf20c3a-serving-cert\") pod \"service-ca-operator-777779d784-brj9b\" (UID: \"b56e5790-d6cb-4643-ab8a-10331bf20c3a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-brj9b" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.190000 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bqhpr\" (UniqueName: \"kubernetes.io/projected/29ba287e-6a40-474a-88ed-c3cd0c9657b4-kube-api-access-bqhpr\") pod \"control-plane-machine-set-operator-78cbb6b69f-pt5mz\" (UID: \"29ba287e-6a40-474a-88ed-c3cd0c9657b4\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-pt5mz" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.190029 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.190063 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84c147c9-ea21-4479-8f78-0f12faa15a2e-config\") pod \"kube-controller-manager-operator-78b949d7b-7cvbf\" (UID: \"84c147c9-ea21-4479-8f78-0f12faa15a2e\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7cvbf" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.190080 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/8eab44d8-62dd-4507-9ffc-cb571fe12289-certs\") pod \"machine-config-server-bwjqq\" (UID: \"8eab44d8-62dd-4507-9ffc-cb571fe12289\") " pod="openshift-machine-config-operator/machine-config-server-bwjqq" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.190100 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/3da5ea57-7db8-4799-b1eb-b18e1e9185ef-plugins-dir\") pod \"csi-hostpathplugin-t7x6w\" (UID: \"3da5ea57-7db8-4799-b1eb-b18e1e9185ef\") " pod="hostpath-provisioner/csi-hostpathplugin-t7x6w" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.190119 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/3da5ea57-7db8-4799-b1eb-b18e1e9185ef-csi-data-dir\") pod \"csi-hostpathplugin-t7x6w\" (UID: \"3da5ea57-7db8-4799-b1eb-b18e1e9185ef\") " pod="hostpath-provisioner/csi-hostpathplugin-t7x6w" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.190117 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6ae73445-81df-49ec-9c77-da00d65eef40-config\") pod \"route-controller-manager-6576b87f9c-dgwd6\" (UID: \"6ae73445-81df-49ec-9c77-da00d65eef40\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dgwd6" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.193017 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b297b670-7982-4cd1-8d8e-b1a3cda0dce1-metrics-tls\") pod \"dns-default-xv59p\" (UID: \"b297b670-7982-4cd1-8d8e-b1a3cda0dce1\") " pod="openshift-dns/dns-default-xv59p" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.193307 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/96e9ec11-59a4-42e6-b0e1-306160e54c23-apiservice-cert\") pod \"packageserver-d55dfcdfc-kxr75\" (UID: \"96e9ec11-59a4-42e6-b0e1-306160e54c23\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kxr75" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.193333 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/b1a50fff-1913-47bd-be58-05284eb4e40e-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-nkjmk\" (UID: \"b1a50fff-1913-47bd-be58-05284eb4e40e\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-nkjmk" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.193612 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/3da5ea57-7db8-4799-b1eb-b18e1e9185ef-plugins-dir\") pod \"csi-hostpathplugin-t7x6w\" (UID: \"3da5ea57-7db8-4799-b1eb-b18e1e9185ef\") " pod="hostpath-provisioner/csi-hostpathplugin-t7x6w" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.194061 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/3da5ea57-7db8-4799-b1eb-b18e1e9185ef-csi-data-dir\") pod \"csi-hostpathplugin-t7x6w\" (UID: \"3da5ea57-7db8-4799-b1eb-b18e1e9185ef\") " pod="hostpath-provisioner/csi-hostpathplugin-t7x6w" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.196171 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/be7271bc-142b-4cff-aabd-8a69d6373849-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-v7lp8\" (UID: \"be7271bc-142b-4cff-aabd-8a69d6373849\") " pod="openshift-marketplace/marketplace-operator-79b997595-v7lp8" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.197805 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/e3cdc2a6-f0b4-4b82-941d-3984c4ba8d2a-signing-cabundle\") pod \"service-ca-9c57cc56f-x9d4d\" (UID: \"e3cdc2a6-f0b4-4b82-941d-3984c4ba8d2a\") " pod="openshift-service-ca/service-ca-9c57cc56f-x9d4d" Nov 25 16:49:28 crc kubenswrapper[4812]: E1125 16:49:28.197976 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:28.697955919 +0000 UTC m=+143.538098084 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.198446 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b297b670-7982-4cd1-8d8e-b1a3cda0dce1-config-volume\") pod \"dns-default-xv59p\" (UID: \"b297b670-7982-4cd1-8d8e-b1a3cda0dce1\") " pod="openshift-dns/dns-default-xv59p" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.199735 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84c147c9-ea21-4479-8f78-0f12faa15a2e-config\") pod \"kube-controller-manager-operator-78b949d7b-7cvbf\" (UID: \"84c147c9-ea21-4479-8f78-0f12faa15a2e\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7cvbf" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.199972 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/3da5ea57-7db8-4799-b1eb-b18e1e9185ef-socket-dir\") pod \"csi-hostpathplugin-t7x6w\" (UID: \"3da5ea57-7db8-4799-b1eb-b18e1e9185ef\") " pod="hostpath-provisioner/csi-hostpathplugin-t7x6w" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.200309 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/96e9ec11-59a4-42e6-b0e1-306160e54c23-tmpfs\") pod \"packageserver-d55dfcdfc-kxr75\" (UID: \"96e9ec11-59a4-42e6-b0e1-306160e54c23\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kxr75" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.200633 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6ae73445-81df-49ec-9c77-da00d65eef40-serving-cert\") pod \"route-controller-manager-6576b87f9c-dgwd6\" (UID: \"6ae73445-81df-49ec-9c77-da00d65eef40\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dgwd6" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.200913 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/3da5ea57-7db8-4799-b1eb-b18e1e9185ef-registration-dir\") pod \"csi-hostpathplugin-t7x6w\" (UID: \"3da5ea57-7db8-4799-b1eb-b18e1e9185ef\") " pod="hostpath-provisioner/csi-hostpathplugin-t7x6w" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.201521 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/84c147c9-ea21-4479-8f78-0f12faa15a2e-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-7cvbf\" (UID: \"84c147c9-ea21-4479-8f78-0f12faa15a2e\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7cvbf" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.201924 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6ae73445-81df-49ec-9c77-da00d65eef40-client-ca\") pod \"route-controller-manager-6576b87f9c-dgwd6\" (UID: \"6ae73445-81df-49ec-9c77-da00d65eef40\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dgwd6" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.202094 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/3da5ea57-7db8-4799-b1eb-b18e1e9185ef-mountpoint-dir\") pod \"csi-hostpathplugin-t7x6w\" (UID: \"3da5ea57-7db8-4799-b1eb-b18e1e9185ef\") " pod="hostpath-provisioner/csi-hostpathplugin-t7x6w" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.202641 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b56e5790-d6cb-4643-ab8a-10331bf20c3a-config\") pod \"service-ca-operator-777779d784-brj9b\" (UID: \"b56e5790-d6cb-4643-ab8a-10331bf20c3a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-brj9b" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.207660 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b56e5790-d6cb-4643-ab8a-10331bf20c3a-serving-cert\") pod \"service-ca-operator-777779d784-brj9b\" (UID: \"b56e5790-d6cb-4643-ab8a-10331bf20c3a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-brj9b" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.208781 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/8eab44d8-62dd-4507-9ffc-cb571fe12289-node-bootstrap-token\") pod \"machine-config-server-bwjqq\" (UID: \"8eab44d8-62dd-4507-9ffc-cb571fe12289\") " pod="openshift-machine-config-operator/machine-config-server-bwjqq" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.209225 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/a4485315-f2bc-47be-80a6-26508fa8719f-profile-collector-cert\") pod \"olm-operator-6b444d44fb-8p5fc\" (UID: \"a4485315-f2bc-47be-80a6-26508fa8719f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8p5fc" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.209250 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a559cd93-6cab-452d-8ff4-abbc835343bb-cert\") pod \"ingress-canary-m57d7\" (UID: \"a559cd93-6cab-452d-8ff4-abbc835343bb\") " pod="openshift-ingress-canary/ingress-canary-m57d7" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.209680 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/e3cdc2a6-f0b4-4b82-941d-3984c4ba8d2a-signing-key\") pod \"service-ca-9c57cc56f-x9d4d\" (UID: \"e3cdc2a6-f0b4-4b82-941d-3984c4ba8d2a\") " pod="openshift-service-ca/service-ca-9c57cc56f-x9d4d" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.209879 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/29ba287e-6a40-474a-88ed-c3cd0c9657b4-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-pt5mz\" (UID: \"29ba287e-6a40-474a-88ed-c3cd0c9657b4\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-pt5mz" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.210238 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/10eaf74e-432a-4036-8d38-531569a99ac9-srv-cert\") pod \"catalog-operator-68c6474976-sqpll\" (UID: \"10eaf74e-432a-4036-8d38-531569a99ac9\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sqpll" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.208560 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/a4485315-f2bc-47be-80a6-26508fa8719f-srv-cert\") pod \"olm-operator-6b444d44fb-8p5fc\" (UID: \"a4485315-f2bc-47be-80a6-26508fa8719f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8p5fc" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.212374 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/8eab44d8-62dd-4507-9ffc-cb571fe12289-certs\") pod \"machine-config-server-bwjqq\" (UID: \"8eab44d8-62dd-4507-9ffc-cb571fe12289\") " pod="openshift-machine-config-operator/machine-config-server-bwjqq" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.212720 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/96e9ec11-59a4-42e6-b0e1-306160e54c23-webhook-cert\") pod \"packageserver-d55dfcdfc-kxr75\" (UID: \"96e9ec11-59a4-42e6-b0e1-306160e54c23\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kxr75" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.212823 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d7bb301b-f2a3-4526-b953-d6aa12d8621c-secret-volume\") pod \"collect-profiles-29401485-brhb9\" (UID: \"d7bb301b-f2a3-4526-b953-d6aa12d8621c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401485-brhb9" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.213432 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/be7271bc-142b-4cff-aabd-8a69d6373849-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-v7lp8\" (UID: \"be7271bc-142b-4cff-aabd-8a69d6373849\") " pod="openshift-marketplace/marketplace-operator-79b997595-v7lp8" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.213474 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/10eaf74e-432a-4036-8d38-531569a99ac9-profile-collector-cert\") pod \"catalog-operator-68c6474976-sqpll\" (UID: \"10eaf74e-432a-4036-8d38-531569a99ac9\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sqpll" Nov 25 16:49:28 crc kubenswrapper[4812]: W1125 16:49:28.217414 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3c9ec92e_e01e_4a37_891a_ddc2293f0ced.slice/crio-175b6426970ca70973d323da90ab3ece5b90795cdcd63820a73d566c6586191a WatchSource:0}: Error finding container 175b6426970ca70973d323da90ab3ece5b90795cdcd63820a73d566c6586191a: Status 404 returned error can't find the container with id 175b6426970ca70973d323da90ab3ece5b90795cdcd63820a73d566c6586191a Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.226464 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c4js7\" (UniqueName: \"kubernetes.io/projected/be7271bc-142b-4cff-aabd-8a69d6373849-kube-api-access-c4js7\") pod \"marketplace-operator-79b997595-v7lp8\" (UID: \"be7271bc-142b-4cff-aabd-8a69d6373849\") " pod="openshift-marketplace/marketplace-operator-79b997595-v7lp8" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.241303 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vjqx4\" (UniqueName: \"kubernetes.io/projected/d7bb301b-f2a3-4526-b953-d6aa12d8621c-kube-api-access-vjqx4\") pod \"collect-profiles-29401485-brhb9\" (UID: \"d7bb301b-f2a3-4526-b953-d6aa12d8621c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401485-brhb9" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.271229 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p47df\" (UniqueName: \"kubernetes.io/projected/b1a50fff-1913-47bd-be58-05284eb4e40e-kube-api-access-p47df\") pod \"package-server-manager-789f6589d5-nkjmk\" (UID: \"b1a50fff-1913-47bd-be58-05284eb4e40e\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-nkjmk" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.282239 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pbdkf\" (UniqueName: \"kubernetes.io/projected/a4485315-f2bc-47be-80a6-26508fa8719f-kube-api-access-pbdkf\") pod \"olm-operator-6b444d44fb-8p5fc\" (UID: \"a4485315-f2bc-47be-80a6-26508fa8719f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8p5fc" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.292404 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:28 crc kubenswrapper[4812]: E1125 16:49:28.293453 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:28.793428814 +0000 UTC m=+143.633570909 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.293553 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:28 crc kubenswrapper[4812]: E1125 16:49:28.294345 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:28.794329091 +0000 UTC m=+143.634471246 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.298683 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x2jx4\" (UniqueName: \"kubernetes.io/projected/96e9ec11-59a4-42e6-b0e1-306160e54c23-kube-api-access-x2jx4\") pod \"packageserver-d55dfcdfc-kxr75\" (UID: \"96e9ec11-59a4-42e6-b0e1-306160e54c23\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kxr75" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.298917 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-hw9bx" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.319890 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4bn2h\" (UniqueName: \"kubernetes.io/projected/10eaf74e-432a-4036-8d38-531569a99ac9-kube-api-access-4bn2h\") pod \"catalog-operator-68c6474976-sqpll\" (UID: \"10eaf74e-432a-4036-8d38-531569a99ac9\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sqpll" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.341092 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l4gfc\" (UniqueName: \"kubernetes.io/projected/a559cd93-6cab-452d-8ff4-abbc835343bb-kube-api-access-l4gfc\") pod \"ingress-canary-m57d7\" (UID: \"a559cd93-6cab-452d-8ff4-abbc835343bb\") " pod="openshift-ingress-canary/ingress-canary-m57d7" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.355651 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v6hst\" (UniqueName: \"kubernetes.io/projected/b297b670-7982-4cd1-8d8e-b1a3cda0dce1-kube-api-access-v6hst\") pod \"dns-default-xv59p\" (UID: \"b297b670-7982-4cd1-8d8e-b1a3cda0dce1\") " pod="openshift-dns/dns-default-xv59p" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.396885 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:28 crc kubenswrapper[4812]: E1125 16:49:28.397607 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:28.897515347 +0000 UTC m=+143.737657442 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.403599 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kxr75" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.404365 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2n7nm\" (UniqueName: \"kubernetes.io/projected/e3cdc2a6-f0b4-4b82-941d-3984c4ba8d2a-kube-api-access-2n7nm\") pod \"service-ca-9c57cc56f-x9d4d\" (UID: \"e3cdc2a6-f0b4-4b82-941d-3984c4ba8d2a\") " pod="openshift-service-ca/service-ca-9c57cc56f-x9d4d" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.415800 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8p5fc" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.419167 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401485-brhb9" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.431851 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dxmgg\" (UniqueName: \"kubernetes.io/projected/b56e5790-d6cb-4643-ab8a-10331bf20c3a-kube-api-access-dxmgg\") pod \"service-ca-operator-777779d784-brj9b\" (UID: \"b56e5790-d6cb-4643-ab8a-10331bf20c3a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-brj9b" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.432458 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dfp6b"] Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.432814 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-v7lp8" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.446797 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t27j6\" (UniqueName: \"kubernetes.io/projected/3da5ea57-7db8-4799-b1eb-b18e1e9185ef-kube-api-access-t27j6\") pod \"csi-hostpathplugin-t7x6w\" (UID: \"3da5ea57-7db8-4799-b1eb-b18e1e9185ef\") " pod="hostpath-provisioner/csi-hostpathplugin-t7x6w" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.468114 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sqpll" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.471858 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/84c147c9-ea21-4479-8f78-0f12faa15a2e-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-7cvbf\" (UID: \"84c147c9-ea21-4479-8f78-0f12faa15a2e\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7cvbf" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.475571 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-nkjmk" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.485948 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-brj9b" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.488334 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b58lk\" (UniqueName: \"kubernetes.io/projected/6ae73445-81df-49ec-9c77-da00d65eef40-kube-api-access-b58lk\") pod \"route-controller-manager-6576b87f9c-dgwd6\" (UID: \"6ae73445-81df-49ec-9c77-da00d65eef40\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dgwd6" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.494117 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-x9d4d" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.504378 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-m57d7" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.505242 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:28 crc kubenswrapper[4812]: E1125 16:49:28.505572 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:29.005560404 +0000 UTC m=+143.845702499 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.510460 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-dd95m" event={"ID":"135cae13-5b75-4d98-9c17-61448faddf90","Type":"ContainerStarted","Data":"a688835dca1f760b8cd52bda38b6b882c7a2a2e4a42bc70e13e1e854a8a9f266"} Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.510846 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-dd95m" event={"ID":"135cae13-5b75-4d98-9c17-61448faddf90","Type":"ContainerStarted","Data":"f4f4a2f5741a2cf490d29d8f65e527d43b20a5ca40e2f44507b764afd761991e"} Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.511595 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-dd95m" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.514899 4812 patch_prober.go:28] interesting pod/downloads-7954f5f757-dd95m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.514943 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-dd95m" podUID="135cae13-5b75-4d98-9c17-61448faddf90" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.524978 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bqhpr\" (UniqueName: \"kubernetes.io/projected/29ba287e-6a40-474a-88ed-c3cd0c9657b4-kube-api-access-bqhpr\") pod \"control-plane-machine-set-operator-78cbb6b69f-pt5mz\" (UID: \"29ba287e-6a40-474a-88ed-c3cd0c9657b4\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-pt5mz" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.531324 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-t7x6w" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.540739 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nbb7l" event={"ID":"3c9ec92e-e01e-4a37-891a-ddc2293f0ced","Type":"ContainerStarted","Data":"175b6426970ca70973d323da90ab3ece5b90795cdcd63820a73d566c6586191a"} Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.541750 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-24xmz\" (UniqueName: \"kubernetes.io/projected/8eab44d8-62dd-4507-9ffc-cb571fe12289-kube-api-access-24xmz\") pod \"machine-config-server-bwjqq\" (UID: \"8eab44d8-62dd-4507-9ffc-cb571fe12289\") " pod="openshift-machine-config-operator/machine-config-server-bwjqq" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.543044 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-xv59p" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.560120 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-bwjqq" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.570657 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hshsk" event={"ID":"c4b1ffb1-0cb4-4429-9eb4-e3f994b2b6c8","Type":"ContainerStarted","Data":"af512e233874c8f297827d85ab5038684991f00e8fb892ab79805875be0bd5e4"} Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.570704 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hshsk" event={"ID":"c4b1ffb1-0cb4-4429-9eb4-e3f994b2b6c8","Type":"ContainerStarted","Data":"f9f196dc938ea8c83b49038d95fe8cc0acf5adc95984322fce7ad8ad8a707c4d"} Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.586876 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-kfwch" event={"ID":"3cc2b341-1b08-45ca-970f-b64350fbe88e","Type":"ContainerStarted","Data":"b316bba79dc03105d7ab3993e6a36b7b14fb97d8d9720290702fb0e1f59cd82a"} Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.586918 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-kfwch" event={"ID":"3cc2b341-1b08-45ca-970f-b64350fbe88e","Type":"ContainerStarted","Data":"c0b21b59171e2d815de7a5b41cc91da825601372cb558f46df8dcac4851d5adb"} Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.593985 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vntd5" event={"ID":"bf199acf-8845-4841-b653-d3f4b704f224","Type":"ContainerStarted","Data":"666e31f4e1d2c23ec251bc24f8bccdacc0a4481e44c67659c22fa5508cce09b7"} Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.596313 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-rqlx6" event={"ID":"46c765da-7def-4c6e-8ac2-8da853bbb378","Type":"ContainerStarted","Data":"74e20e0833ebac1968b2de94998c465acd5eebe0395cdbddb423effca3dece46"} Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.598298 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-pxgkd" event={"ID":"64f34f9a-3fdc-492d-a75f-93e4a3a8727f","Type":"ContainerStarted","Data":"657d089d9c0c526e6ad369a4fc0b0449e2939a89e22283cddc93334b468c032f"} Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.598324 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-pxgkd" event={"ID":"64f34f9a-3fdc-492d-a75f-93e4a3a8727f","Type":"ContainerStarted","Data":"627bd39bb0398dd9808a42e1896e355b97d9fa9bd6762b1ed987c8786978ea55"} Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.605258 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-9t6bj" event={"ID":"01a09641-0222-4bd8-af33-bf92edcc229c","Type":"ContainerStarted","Data":"b1adbbd29ac387a54f69c974adc7eadced01165a776c0aa95551813543eb84b2"} Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.607060 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:28 crc kubenswrapper[4812]: E1125 16:49:28.608595 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:29.108572994 +0000 UTC m=+143.948715089 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.609175 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-pxgkd" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.612832 4812 patch_prober.go:28] interesting pod/router-default-5444994796-pxgkd container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.612889 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pxgkd" podUID="64f34f9a-3fdc-492d-a75f-93e4a3a8727f" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.619001 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-2f9nk" event={"ID":"b887db51-86fd-44fb-b146-21b546ae5345","Type":"ContainerStarted","Data":"9a613231cfedfcdf4d2835aa133ca93154aa29a334c36a150cbf7e6fa98a3719"} Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.632864 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-nj6w8" event={"ID":"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf","Type":"ContainerStarted","Data":"5753762cbf49f0682f0f0a151110b4880f4bae747d2a689a45dccd14bf722c67"} Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.632910 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-nj6w8" event={"ID":"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf","Type":"ContainerStarted","Data":"a7c90059d5030c08a459a2626af94154eb40bdafd38b5ca0a74d3a5149585a7b"} Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.639554 4812 generic.go:334] "Generic (PLEG): container finished" podID="464e93da-ad92-4165-b1c2-6cede11ac006" containerID="f3149f486351fe176dead6824719f79ef467187423a1576791a5da116b0b1244" exitCode=0 Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.639639 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-64j56" event={"ID":"464e93da-ad92-4165-b1c2-6cede11ac006","Type":"ContainerDied","Data":"f3149f486351fe176dead6824719f79ef467187423a1576791a5da116b0b1244"} Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.639696 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-64j56" event={"ID":"464e93da-ad92-4165-b1c2-6cede11ac006","Type":"ContainerStarted","Data":"8f8fc11ac54919caf16b5bbb55345be54bb9b893847016fa41a807eb686f5f8c"} Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.677287 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-gvnqg" event={"ID":"6a4b74ae-6e26-4983-b8aa-08f4a9935aca","Type":"ContainerStarted","Data":"65968fb0bb007bb8389b5498ffae688a68b338e7d199ae95a68101b08e5fb1c5"} Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.686677 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" event={"ID":"8bad98aa-94be-4024-8cb5-dc6078ffec1f","Type":"ContainerStarted","Data":"2e5b182a27686b156843e05e272fbad75f7befd37c685ae9883213468f05a11d"} Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.686750 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.695161 4812 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-fjv5r container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.7:6443/healthz\": dial tcp 10.217.0.7:6443: connect: connection refused" start-of-body= Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.695229 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" podUID="8bad98aa-94be-4024-8cb5-dc6078ffec1f" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.7:6443/healthz\": dial tcp 10.217.0.7:6443: connect: connection refused" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.709001 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:28 crc kubenswrapper[4812]: E1125 16:49:28.713129 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:29.213109681 +0000 UTC m=+144.053251946 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.713735 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-5gv9p"] Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.713832 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-td2bc" event={"ID":"4662d425-aec1-4e58-845b-36ae7574da7a","Type":"ContainerStarted","Data":"181d3a3e6c8405fc117c4fd094baf7711c0026f5e8ca20c5fbc5a920e54119c7"} Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.713875 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-td2bc" event={"ID":"4662d425-aec1-4e58-845b-36ae7574da7a","Type":"ContainerStarted","Data":"e1b2201c487b400ed9022ac9d4f5ff356a3cee2a281f2a805702f38e36afeaf0"} Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.731753 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-wmngc"] Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.737968 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7cvbf" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.751293 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dgwd6" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.755410 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-pt5mz" Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.761577 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-4chwd"] Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.813047 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:28 crc kubenswrapper[4812]: E1125 16:49:28.813702 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:29.313670775 +0000 UTC m=+144.153812870 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.815109 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:28 crc kubenswrapper[4812]: E1125 16:49:28.829160 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:29.329128827 +0000 UTC m=+144.169270922 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.856270 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-fc6mw"] Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.865964 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jz82q"] Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.874826 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-splm8"] Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.920896 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:28 crc kubenswrapper[4812]: E1125 16:49:28.921261 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:29.421238197 +0000 UTC m=+144.261380292 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.921370 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:28 crc kubenswrapper[4812]: E1125 16:49:28.921756 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:29.421748984 +0000 UTC m=+144.261891079 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.926086 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-5hrtq"] Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.947447 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-r74vx"] Nov 25 16:49:28 crc kubenswrapper[4812]: I1125 16:49:28.952197 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-sdrv4"] Nov 25 16:49:29 crc kubenswrapper[4812]: I1125 16:49:29.003020 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sqpll"] Nov 25 16:49:29 crc kubenswrapper[4812]: I1125 16:49:29.023085 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:29 crc kubenswrapper[4812]: E1125 16:49:29.023570 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:29.523521565 +0000 UTC m=+144.363663660 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:29 crc kubenswrapper[4812]: W1125 16:49:29.032613 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod02792ddc_034c_4fa3_8e9b_bde721cb94e5.slice/crio-87dad16b82e05b30db248d3a860719ae426f7dbb64ae68b4836a9e74ded98676 WatchSource:0}: Error finding container 87dad16b82e05b30db248d3a860719ae426f7dbb64ae68b4836a9e74ded98676: Status 404 returned error can't find the container with id 87dad16b82e05b30db248d3a860719ae426f7dbb64ae68b4836a9e74ded98676 Nov 25 16:49:29 crc kubenswrapper[4812]: I1125 16:49:29.111728 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-hw9bx"] Nov 25 16:49:29 crc kubenswrapper[4812]: I1125 16:49:29.116487 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-w5fdp"] Nov 25 16:49:29 crc kubenswrapper[4812]: I1125 16:49:29.125428 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:29 crc kubenswrapper[4812]: E1125 16:49:29.125750 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:29.62573567 +0000 UTC m=+144.465877765 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:29 crc kubenswrapper[4812]: W1125 16:49:29.146127 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8eab44d8_62dd_4507_9ffc_cb571fe12289.slice/crio-880decacb38d1b1f22a1f05cabfbf5b219a54727f7b6db8243a84a4c684e3e70 WatchSource:0}: Error finding container 880decacb38d1b1f22a1f05cabfbf5b219a54727f7b6db8243a84a4c684e3e70: Status 404 returned error can't find the container with id 880decacb38d1b1f22a1f05cabfbf5b219a54727f7b6db8243a84a4c684e3e70 Nov 25 16:49:29 crc kubenswrapper[4812]: I1125 16:49:29.184054 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-nj6w8" podStartSLOduration=122.184036227 podStartE2EDuration="2m2.184036227s" podCreationTimestamp="2025-11-25 16:47:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:29.147014083 +0000 UTC m=+143.987156198" watchObservedRunningTime="2025-11-25 16:49:29.184036227 +0000 UTC m=+144.024178322" Nov 25 16:49:29 crc kubenswrapper[4812]: I1125 16:49:29.186698 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-brj9b"] Nov 25 16:49:29 crc kubenswrapper[4812]: I1125 16:49:29.228234 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:29 crc kubenswrapper[4812]: E1125 16:49:29.228502 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:29.728486992 +0000 UTC m=+144.568629087 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:29 crc kubenswrapper[4812]: I1125 16:49:29.229029 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-dd95m" podStartSLOduration=122.229018019 podStartE2EDuration="2m2.229018019s" podCreationTimestamp="2025-11-25 16:47:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:29.228675268 +0000 UTC m=+144.068817363" watchObservedRunningTime="2025-11-25 16:49:29.229018019 +0000 UTC m=+144.069160124" Nov 25 16:49:29 crc kubenswrapper[4812]: I1125 16:49:29.299599 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-v7lp8"] Nov 25 16:49:29 crc kubenswrapper[4812]: I1125 16:49:29.332610 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:29 crc kubenswrapper[4812]: E1125 16:49:29.333034 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:29.833008259 +0000 UTC m=+144.673150354 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:29 crc kubenswrapper[4812]: I1125 16:49:29.435927 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:29 crc kubenswrapper[4812]: E1125 16:49:29.436846 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:29.936818904 +0000 UTC m=+144.776960999 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:29 crc kubenswrapper[4812]: I1125 16:49:29.465096 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-pxgkd" podStartSLOduration=121.465073205 podStartE2EDuration="2m1.465073205s" podCreationTimestamp="2025-11-25 16:47:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:29.451205253 +0000 UTC m=+144.291347378" watchObservedRunningTime="2025-11-25 16:49:29.465073205 +0000 UTC m=+144.305215300" Nov 25 16:49:29 crc kubenswrapper[4812]: I1125 16:49:29.504225 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kxr75"] Nov 25 16:49:29 crc kubenswrapper[4812]: I1125 16:49:29.537975 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:29 crc kubenswrapper[4812]: E1125 16:49:29.538303 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:30.038286136 +0000 UTC m=+144.878428231 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:29 crc kubenswrapper[4812]: I1125 16:49:29.643516 4812 patch_prober.go:28] interesting pod/router-default-5444994796-pxgkd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 16:49:29 crc kubenswrapper[4812]: [-]has-synced failed: reason withheld Nov 25 16:49:29 crc kubenswrapper[4812]: [+]process-running ok Nov 25 16:49:29 crc kubenswrapper[4812]: healthz check failed Nov 25 16:49:29 crc kubenswrapper[4812]: I1125 16:49:29.643594 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pxgkd" podUID="64f34f9a-3fdc-492d-a75f-93e4a3a8727f" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 16:49:29 crc kubenswrapper[4812]: I1125 16:49:29.644401 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:29 crc kubenswrapper[4812]: E1125 16:49:29.645096 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:30.145076334 +0000 UTC m=+144.985218429 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:29 crc kubenswrapper[4812]: I1125 16:49:29.650671 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" podStartSLOduration=122.650651188 podStartE2EDuration="2m2.650651188s" podCreationTimestamp="2025-11-25 16:47:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:29.620058264 +0000 UTC m=+144.460200359" watchObservedRunningTime="2025-11-25 16:49:29.650651188 +0000 UTC m=+144.490793283" Nov 25 16:49:29 crc kubenswrapper[4812]: W1125 16:49:29.708631 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod96e9ec11_59a4_42e6_b0e1_306160e54c23.slice/crio-e3bfa7a8258f73f6f811a051a94a5e8226f15abf15ac4cacd2daaae498cf1cf0 WatchSource:0}: Error finding container e3bfa7a8258f73f6f811a051a94a5e8226f15abf15ac4cacd2daaae498cf1cf0: Status 404 returned error can't find the container with id e3bfa7a8258f73f6f811a051a94a5e8226f15abf15ac4cacd2daaae498cf1cf0 Nov 25 16:49:29 crc kubenswrapper[4812]: I1125 16:49:29.746749 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:29 crc kubenswrapper[4812]: E1125 16:49:29.747199 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:30.247183776 +0000 UTC m=+145.087325881 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:29 crc kubenswrapper[4812]: I1125 16:49:29.759464 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-sdrv4" event={"ID":"f7e67da4-caf9-4204-94a3-22f4e562a827","Type":"ContainerStarted","Data":"e92fa6ae27c9c969441f2f2e67ced8a565aee68419d94786305e91787d07f3c6"} Nov 25 16:49:29 crc kubenswrapper[4812]: I1125 16:49:29.760864 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-9t6bj" event={"ID":"01a09641-0222-4bd8-af33-bf92edcc229c","Type":"ContainerStarted","Data":"061d0abb21477d54275876dbf9e552ea7f3b6875e51d7d0a6a848ba6044c20d8"} Nov 25 16:49:29 crc kubenswrapper[4812]: I1125 16:49:29.761952 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-9t6bj" Nov 25 16:49:29 crc kubenswrapper[4812]: I1125 16:49:29.766629 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-64j56" event={"ID":"464e93da-ad92-4165-b1c2-6cede11ac006","Type":"ContainerStarted","Data":"493f82a5c4b9f7e0300db13142f3b3199298ba53ae8283d470f4950b7df77518"} Nov 25 16:49:29 crc kubenswrapper[4812]: I1125 16:49:29.767630 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-64j56" Nov 25 16:49:29 crc kubenswrapper[4812]: I1125 16:49:29.769168 4812 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-9t6bj container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Nov 25 16:49:29 crc kubenswrapper[4812]: I1125 16:49:29.769223 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-9t6bj" podUID="01a09641-0222-4bd8-af33-bf92edcc229c" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" Nov 25 16:49:29 crc kubenswrapper[4812]: I1125 16:49:29.772761 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sqpll" event={"ID":"10eaf74e-432a-4036-8d38-531569a99ac9","Type":"ContainerStarted","Data":"5ba0bf76c048d1dafedcec224025de0d67fec12d6f666d5fad7b2ce2c0e72944"} Nov 25 16:49:29 crc kubenswrapper[4812]: I1125 16:49:29.775324 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-brj9b" event={"ID":"b56e5790-d6cb-4643-ab8a-10331bf20c3a","Type":"ContainerStarted","Data":"b0ca4abd1e8cda09a4cb24079b427a05e261e68e9c9f8baa3a346d3c52cf42cf"} Nov 25 16:49:29 crc kubenswrapper[4812]: I1125 16:49:29.778120 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-fc6mw" event={"ID":"f030671f-5121-4cb4-8163-5c65444c1896","Type":"ContainerStarted","Data":"82f02fcfff4ce01fd9060595e29c680dc6a61c5b97977c51f597a67023546840"} Nov 25 16:49:29 crc kubenswrapper[4812]: I1125 16:49:29.848342 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:29 crc kubenswrapper[4812]: E1125 16:49:29.849730 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:30.34971112 +0000 UTC m=+145.189853215 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:29 crc kubenswrapper[4812]: I1125 16:49:29.950390 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-gvnqg" event={"ID":"6a4b74ae-6e26-4983-b8aa-08f4a9935aca","Type":"ContainerStarted","Data":"542530cd7f763f007aef1e18d778d2304fb4d214bb648dfa086dfeb45b81bb7f"} Nov 25 16:49:29 crc kubenswrapper[4812]: I1125 16:49:29.956623 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-4chwd" event={"ID":"f93960d1-f97a-4d2d-9e04-edb082755a9a","Type":"ContainerStarted","Data":"4821d91a3f842e194a40b864a9e87a56135c3063caa278eca2ca07e2df9b7a20"} Nov 25 16:49:29 crc kubenswrapper[4812]: I1125 16:49:29.957354 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jz82q" event={"ID":"5f0654c4-3371-4451-938f-803e6f1ffa69","Type":"ContainerStarted","Data":"9ec28a228b447eb71165e8f0405b6d28c186de94ced3a1392f835708aab40d26"} Nov 25 16:49:29 crc kubenswrapper[4812]: I1125 16:49:29.950475 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:29 crc kubenswrapper[4812]: E1125 16:49:29.950804 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:30.45078491 +0000 UTC m=+145.290927005 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:29 crc kubenswrapper[4812]: I1125 16:49:29.972588 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-kfwch" event={"ID":"3cc2b341-1b08-45ca-970f-b64350fbe88e","Type":"ContainerStarted","Data":"21b7fefacb438c27547e6a93d779cbb2267d6113456034ea19cd502454bcce76"} Nov 25 16:49:29 crc kubenswrapper[4812]: I1125 16:49:29.987499 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w5fdp" event={"ID":"13085b2e-8c8e-4023-b86b-99dbfe7c7b5f","Type":"ContainerStarted","Data":"fa48f3e41d2ee28b545c9db70339e8cb222f2e64ace4b83f47932587443224e5"} Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.004451 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-wmngc" event={"ID":"aeecd090-6524-4d8e-a0eb-e785b78f99c2","Type":"ContainerStarted","Data":"cd1af5b7e9300ceba04f4eed3dd158d051387490f4efb539bf72b1a6a4ef5688"} Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.012962 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-hshsk" podStartSLOduration=125.012936727 podStartE2EDuration="2m5.012936727s" podCreationTimestamp="2025-11-25 16:47:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:29.973252301 +0000 UTC m=+144.813394396" watchObservedRunningTime="2025-11-25 16:49:30.012936727 +0000 UTC m=+144.853078842" Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.038463 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-2f9nk" event={"ID":"b887db51-86fd-44fb-b146-21b546ae5345","Type":"ContainerStarted","Data":"60651be20295f46b6cb1854a65fc76b62a4e805964c73b27d7b6533e006a0b66"} Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.060222 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:30 crc kubenswrapper[4812]: E1125 16:49:30.060651 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:30.560377146 +0000 UTC m=+145.400519241 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.061043 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.061195 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-hw9bx" event={"ID":"5cca6293-58b3-40fc-939d-f43aa66a57e7","Type":"ContainerStarted","Data":"5eab1df5375e13060249b2812c27ee69d380b7b6ef53a333b74a8b0369dd3c71"} Nov 25 16:49:30 crc kubenswrapper[4812]: E1125 16:49:30.063233 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:30.562026187 +0000 UTC m=+145.402168272 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.068710 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401485-brhb9"] Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.069037 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-5gv9p" event={"ID":"e9cf419e-cde9-4a86-a206-24ac78d1e475","Type":"ContainerStarted","Data":"d6c9e1f1ae0d8ac62b002da03f854e8b40c363b3aabe84346e95445734d22ed8"} Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.103904 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-nkjmk"] Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.105561 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-splm8" event={"ID":"34cd8aa7-5566-4824-8c72-10438437ef94","Type":"ContainerStarted","Data":"1490bb9269b4d77da107092b42cf42a2607afaff05e328e26eebfd3bf668dfa6"} Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.130592 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nbb7l" event={"ID":"3c9ec92e-e01e-4a37-891a-ddc2293f0ced","Type":"ContainerStarted","Data":"65bebe671df6bbdfd3b9daec38468e2ff83e7bb5f20c0c7c835309d0341d16a1"} Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.152874 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vntd5" event={"ID":"bf199acf-8845-4841-b653-d3f4b704f224","Type":"ContainerStarted","Data":"00bf7f06ccdc2b477b812fa5242e89618745442f568c43cb7913ef2439f89728"} Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.152937 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vntd5" event={"ID":"bf199acf-8845-4841-b653-d3f4b704f224","Type":"ContainerStarted","Data":"522fcd9e23d3588f2a3223d0738b03eb9666e8bdcc9c4ae8a413c1fe23bae35a"} Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.162809 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:30 crc kubenswrapper[4812]: E1125 16:49:30.163599 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:30.663495149 +0000 UTC m=+145.503637244 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.172109 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-5hrtq" event={"ID":"3b01e1d1-14fa-4594-b52e-07b377965f5e","Type":"ContainerStarted","Data":"b586d15586a224ce30f4bfb4a3eea8231365654a995d9a2a108c3cff1b0ff25e"} Nov 25 16:49:30 crc kubenswrapper[4812]: W1125 16:49:30.187499 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb1a50fff_1913_47bd_be58_05284eb4e40e.slice/crio-d7b4c1a77756bbee86f0906588cc3e2aa25031db5ea4a33f37be509c87ff5e7d WatchSource:0}: Error finding container d7b4c1a77756bbee86f0906588cc3e2aa25031db5ea4a33f37be509c87ff5e7d: Status 404 returned error can't find the container with id d7b4c1a77756bbee86f0906588cc3e2aa25031db5ea4a33f37be509c87ff5e7d Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.203321 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-dgwd6"] Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.209930 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-x9d4d"] Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.211075 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dfp6b" event={"ID":"a1348b83-535e-4211-891a-d234f9e9c4ec","Type":"ContainerStarted","Data":"4bc41d133ad65234d74264015eb30a3ef4ba5622a5075956c15fbb167a764d67"} Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.211166 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dfp6b" event={"ID":"a1348b83-535e-4211-891a-d234f9e9c4ec","Type":"ContainerStarted","Data":"b408823ff58d8b7fca86f1211c734eee643db1d95b174d586cf6b317fe177ab4"} Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.214284 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-m57d7"] Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.222334 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-td2bc" event={"ID":"4662d425-aec1-4e58-845b-36ae7574da7a","Type":"ContainerStarted","Data":"5e687dec4f3745bc18a46fd79056fd7edd4e94885b54bdf253bf1716c7e430ff"} Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.227202 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-r74vx" event={"ID":"02792ddc-034c-4fa3-8e9b-bde721cb94e5","Type":"ContainerStarted","Data":"87dad16b82e05b30db248d3a860719ae426f7dbb64ae68b4836a9e74ded98676"} Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.235773 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-9t6bj" podStartSLOduration=123.23574111 podStartE2EDuration="2m3.23574111s" podCreationTimestamp="2025-11-25 16:47:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:30.231180898 +0000 UTC m=+145.071322993" watchObservedRunningTime="2025-11-25 16:49:30.23574111 +0000 UTC m=+145.075883215" Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.251628 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-t7x6w"] Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.259450 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-bwjqq" event={"ID":"8eab44d8-62dd-4507-9ffc-cb571fe12289","Type":"ContainerStarted","Data":"880decacb38d1b1f22a1f05cabfbf5b219a54727f7b6db8243a84a4c684e3e70"} Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.265401 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:30 crc kubenswrapper[4812]: E1125 16:49:30.266780 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:30.766767917 +0000 UTC m=+145.606910012 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:30 crc kubenswrapper[4812]: W1125 16:49:30.274161 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode3cdc2a6_f0b4_4b82_941d_3984c4ba8d2a.slice/crio-4401fdd1da618ce08a4ed224e3553b4dece889c91df8cb0895ceacc817875dda WatchSource:0}: Error finding container 4401fdd1da618ce08a4ed224e3553b4dece889c91df8cb0895ceacc817875dda: Status 404 returned error can't find the container with id 4401fdd1da618ce08a4ed224e3553b4dece889c91df8cb0895ceacc817875dda Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.294130 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-gvnqg" podStartSLOduration=123.29411206 podStartE2EDuration="2m3.29411206s" podCreationTimestamp="2025-11-25 16:47:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:30.293860471 +0000 UTC m=+145.134002586" watchObservedRunningTime="2025-11-25 16:49:30.29411206 +0000 UTC m=+145.134254155" Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.299435 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" event={"ID":"8bad98aa-94be-4024-8cb5-dc6078ffec1f","Type":"ContainerStarted","Data":"cb5999fe4e3d3282f1c1e6ae6a20006d31ace8a64e83fc4b048795d174e0ad68"} Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.300106 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8p5fc"] Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.314464 4812 generic.go:334] "Generic (PLEG): container finished" podID="46c765da-7def-4c6e-8ac2-8da853bbb378" containerID="8ee9fb30f4a2bcb0a9fe7f05288442b0d3b3b008dd835113cf69e6f61e67d9e8" exitCode=0 Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.314597 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-rqlx6" event={"ID":"46c765da-7def-4c6e-8ac2-8da853bbb378","Type":"ContainerDied","Data":"8ee9fb30f4a2bcb0a9fe7f05288442b0d3b3b008dd835113cf69e6f61e67d9e8"} Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.331315 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-v7lp8" event={"ID":"be7271bc-142b-4cff-aabd-8a69d6373849","Type":"ContainerStarted","Data":"77e164fbc764d8f526d52d0f2af64514bc7e9b945354f3afef6d275721f18726"} Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.331403 4812 patch_prober.go:28] interesting pod/downloads-7954f5f757-dd95m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.331449 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-dd95m" podUID="135cae13-5b75-4d98-9c17-61448faddf90" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.333452 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-vntd5" podStartSLOduration=123.333436695 podStartE2EDuration="2m3.333436695s" podCreationTimestamp="2025-11-25 16:47:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:30.332219827 +0000 UTC m=+145.172361922" watchObservedRunningTime="2025-11-25 16:49:30.333436695 +0000 UTC m=+145.173578790" Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.359594 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7cvbf"] Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.367162 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:30 crc kubenswrapper[4812]: E1125 16:49:30.368607 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:30.86858473 +0000 UTC m=+145.708726865 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.371830 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-xv59p"] Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.378135 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-pt5mz"] Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.395974 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-2f9nk" podStartSLOduration=123.395952602 podStartE2EDuration="2m3.395952602s" podCreationTimestamp="2025-11-25 16:47:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:30.36024496 +0000 UTC m=+145.200387065" watchObservedRunningTime="2025-11-25 16:49:30.395952602 +0000 UTC m=+145.236094697" Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.426842 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-kfwch" podStartSLOduration=122.426819974 podStartE2EDuration="2m2.426819974s" podCreationTimestamp="2025-11-25 16:47:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:30.390255975 +0000 UTC m=+145.230398080" watchObservedRunningTime="2025-11-25 16:49:30.426819974 +0000 UTC m=+145.266962089" Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.433974 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nbb7l" podStartSLOduration=123.433947467 podStartE2EDuration="2m3.433947467s" podCreationTimestamp="2025-11-25 16:47:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:30.418832686 +0000 UTC m=+145.258974781" watchObservedRunningTime="2025-11-25 16:49:30.433947467 +0000 UTC m=+145.274089562" Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.462081 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-64j56" podStartSLOduration=123.462055533 podStartE2EDuration="2m3.462055533s" podCreationTimestamp="2025-11-25 16:47:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:30.451596287 +0000 UTC m=+145.291738382" watchObservedRunningTime="2025-11-25 16:49:30.462055533 +0000 UTC m=+145.302197628" Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.468891 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:30 crc kubenswrapper[4812]: E1125 16:49:30.470461 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:30.970442374 +0000 UTC m=+145.810584469 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:30 crc kubenswrapper[4812]: W1125 16:49:30.475237 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda4485315_f2bc_47be_80a6_26508fa8719f.slice/crio-b918ae72c79b6ae66860e140fdeb0c2a2d400527b6cce1a3c34fa2d19bf80989 WatchSource:0}: Error finding container b918ae72c79b6ae66860e140fdeb0c2a2d400527b6cce1a3c34fa2d19bf80989: Status 404 returned error can't find the container with id b918ae72c79b6ae66860e140fdeb0c2a2d400527b6cce1a3c34fa2d19bf80989 Nov 25 16:49:30 crc kubenswrapper[4812]: W1125 16:49:30.510592 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb297b670_7982_4cd1_8d8e_b1a3cda0dce1.slice/crio-e2519abc782afd7647b0f60f8ea10878701df068a4d3053c52b9b77f26a6e511 WatchSource:0}: Error finding container e2519abc782afd7647b0f60f8ea10878701df068a4d3053c52b9b77f26a6e511: Status 404 returned error can't find the container with id e2519abc782afd7647b0f60f8ea10878701df068a4d3053c52b9b77f26a6e511 Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.518898 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-td2bc" podStartSLOduration=122.518876523 podStartE2EDuration="2m2.518876523s" podCreationTimestamp="2025-11-25 16:47:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:30.514883539 +0000 UTC m=+145.355025634" watchObservedRunningTime="2025-11-25 16:49:30.518876523 +0000 UTC m=+145.359018618" Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.570043 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:30 crc kubenswrapper[4812]: E1125 16:49:30.570294 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:31.070244684 +0000 UTC m=+145.910386779 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.570706 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:30 crc kubenswrapper[4812]: E1125 16:49:30.571052 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:31.071042668 +0000 UTC m=+145.911184763 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.590409 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dfp6b" podStartSLOduration=122.590390061 podStartE2EDuration="2m2.590390061s" podCreationTimestamp="2025-11-25 16:47:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:30.542617003 +0000 UTC m=+145.382759128" watchObservedRunningTime="2025-11-25 16:49:30.590390061 +0000 UTC m=+145.430532156" Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.623887 4812 patch_prober.go:28] interesting pod/router-default-5444994796-pxgkd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 16:49:30 crc kubenswrapper[4812]: [-]has-synced failed: reason withheld Nov 25 16:49:30 crc kubenswrapper[4812]: [+]process-running ok Nov 25 16:49:30 crc kubenswrapper[4812]: healthz check failed Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.624306 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pxgkd" podUID="64f34f9a-3fdc-492d-a75f-93e4a3a8727f" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.671830 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:30 crc kubenswrapper[4812]: E1125 16:49:30.672228 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:31.172209642 +0000 UTC m=+146.012351737 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.774409 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:30 crc kubenswrapper[4812]: E1125 16:49:30.774943 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:31.274929672 +0000 UTC m=+146.115071767 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.860845 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.876585 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:30 crc kubenswrapper[4812]: E1125 16:49:30.876749 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:31.376724215 +0000 UTC m=+146.216866310 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.877026 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:30 crc kubenswrapper[4812]: E1125 16:49:30.877377 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:31.377362364 +0000 UTC m=+146.217504459 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:30 crc kubenswrapper[4812]: I1125 16:49:30.991896 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:30 crc kubenswrapper[4812]: E1125 16:49:30.992603 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:31.492584036 +0000 UTC m=+146.332726131 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.095453 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:31 crc kubenswrapper[4812]: E1125 16:49:31.095889 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:31.595871673 +0000 UTC m=+146.436013788 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.196143 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:31 crc kubenswrapper[4812]: E1125 16:49:31.197274 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:31.697257693 +0000 UTC m=+146.537399778 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.305976 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:31 crc kubenswrapper[4812]: E1125 16:49:31.306382 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:31.806368153 +0000 UTC m=+146.646510248 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.410936 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:31 crc kubenswrapper[4812]: E1125 16:49:31.411423 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:31.911404437 +0000 UTC m=+146.751546542 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.474162 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-v7lp8" event={"ID":"be7271bc-142b-4cff-aabd-8a69d6373849","Type":"ContainerStarted","Data":"e56b3180da629bf5ccbf92db9a499ee16b21ac5154d01e8ea45231c00fed9d38"} Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.475944 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-v7lp8" Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.487673 4812 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-v7lp8 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.20:8080/healthz\": dial tcp 10.217.0.20:8080: connect: connection refused" start-of-body= Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.487918 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-v7lp8" podUID="be7271bc-142b-4cff-aabd-8a69d6373849" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.20:8080/healthz\": dial tcp 10.217.0.20:8080: connect: connection refused" Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.491103 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-brj9b" event={"ID":"b56e5790-d6cb-4643-ab8a-10331bf20c3a","Type":"ContainerStarted","Data":"cd8964b9b57e059fdf8d73daf0370136d03adc228fd5ac5d59d41420b1a783b4"} Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.505052 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-pt5mz" event={"ID":"29ba287e-6a40-474a-88ed-c3cd0c9657b4","Type":"ContainerStarted","Data":"75ba57b485b7da4ce43db89f4c03eac57a2de1da63884f2c23a7a6f822b1d42b"} Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.513424 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:31 crc kubenswrapper[4812]: E1125 16:49:31.514274 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:32.014242421 +0000 UTC m=+146.854384516 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.526672 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-nkjmk" event={"ID":"b1a50fff-1913-47bd-be58-05284eb4e40e","Type":"ContainerStarted","Data":"5d08fddb749e6fac60a8c4c16a5bca4dd487cbd87f8bc2938f58da0c4bdc8f21"} Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.526713 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-nkjmk" event={"ID":"b1a50fff-1913-47bd-be58-05284eb4e40e","Type":"ContainerStarted","Data":"d7b4c1a77756bbee86f0906588cc3e2aa25031db5ea4a33f37be509c87ff5e7d"} Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.531583 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jz82q" event={"ID":"5f0654c4-3371-4451-938f-803e6f1ffa69","Type":"ContainerStarted","Data":"28089ae75c03f742b9de3fad92d42dd72ac62fce273b7f6c70ee7c26aa5e4e69"} Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.531879 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-v7lp8" podStartSLOduration=123.531867471 podStartE2EDuration="2m3.531867471s" podCreationTimestamp="2025-11-25 16:47:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:31.529458605 +0000 UTC m=+146.369600700" watchObservedRunningTime="2025-11-25 16:49:31.531867471 +0000 UTC m=+146.372009566" Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.550187 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-r74vx" event={"ID":"02792ddc-034c-4fa3-8e9b-bde721cb94e5","Type":"ContainerStarted","Data":"b371a33d51333625e618c776cc9c890230615d56ae580808c6bcb059a46133a8"} Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.551219 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7cvbf" event={"ID":"84c147c9-ea21-4479-8f78-0f12faa15a2e","Type":"ContainerStarted","Data":"4d63142cf3893017838f3808298a9edfc9b3aa0b6993fefea7efcab1d8594e61"} Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.556240 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-sdrv4" event={"ID":"f7e67da4-caf9-4204-94a3-22f4e562a827","Type":"ContainerStarted","Data":"92aac8afbbccefbb94d1ee3e98d24be34170d3f0895fd111c422e1646363c643"} Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.560562 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-fc6mw" event={"ID":"f030671f-5121-4cb4-8163-5c65444c1896","Type":"ContainerStarted","Data":"25b61ff5dbffaedded30609d6318d514c2a88ceb4eaba3711e6fdd3d091fa47f"} Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.561735 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-brj9b" podStartSLOduration=123.56172312 podStartE2EDuration="2m3.56172312s" podCreationTimestamp="2025-11-25 16:47:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:31.559118279 +0000 UTC m=+146.399260384" watchObservedRunningTime="2025-11-25 16:49:31.56172312 +0000 UTC m=+146.401865215" Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.570708 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-rqlx6" event={"ID":"46c765da-7def-4c6e-8ac2-8da853bbb378","Type":"ContainerStarted","Data":"b63a1615c544126abe63e1495a266d0e81a6fe3de9c2bb296a3ba49f13b9b1ec"} Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.611912 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-hw9bx" event={"ID":"5cca6293-58b3-40fc-939d-f43aa66a57e7","Type":"ContainerStarted","Data":"e4ed9afc92eaeb0f52682d2454b307bad72d50d35134d1312fe98df9f19cc442"} Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.613197 4812 patch_prober.go:28] interesting pod/router-default-5444994796-pxgkd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 16:49:31 crc kubenswrapper[4812]: [-]has-synced failed: reason withheld Nov 25 16:49:31 crc kubenswrapper[4812]: [+]process-running ok Nov 25 16:49:31 crc kubenswrapper[4812]: healthz check failed Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.613248 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pxgkd" podUID="64f34f9a-3fdc-492d-a75f-93e4a3a8727f" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.615238 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:31 crc kubenswrapper[4812]: E1125 16:49:31.616356 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:32.116341083 +0000 UTC m=+146.956483168 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.619181 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-wmngc" event={"ID":"aeecd090-6524-4d8e-a0eb-e785b78f99c2","Type":"ContainerStarted","Data":"e6f83ef318f9da62db94b87c47e75ca3ab0bba2a2b14baadab4c63229251fcec"} Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.619820 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jz82q" podStartSLOduration=123.61980524 podStartE2EDuration="2m3.61980524s" podCreationTimestamp="2025-11-25 16:47:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:31.585641956 +0000 UTC m=+146.425784051" watchObservedRunningTime="2025-11-25 16:49:31.61980524 +0000 UTC m=+146.459947335" Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.620752 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-sdrv4" podStartSLOduration=124.62074493 podStartE2EDuration="2m4.62074493s" podCreationTimestamp="2025-11-25 16:47:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:31.617361395 +0000 UTC m=+146.457503510" watchObservedRunningTime="2025-11-25 16:49:31.62074493 +0000 UTC m=+146.460887025" Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.642814 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-splm8" event={"ID":"34cd8aa7-5566-4824-8c72-10438437ef94","Type":"ContainerStarted","Data":"0d487033de138bda759acbcad8505e8c5582cd08664275ec215db7455344ccd3"} Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.643652 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-splm8" Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.645287 4812 patch_prober.go:28] interesting pod/console-operator-58897d9998-splm8 container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.15:8443/readyz\": dial tcp 10.217.0.15:8443: connect: connection refused" start-of-body= Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.645352 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-splm8" podUID="34cd8aa7-5566-4824-8c72-10438437ef94" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.15:8443/readyz\": dial tcp 10.217.0.15:8443: connect: connection refused" Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.655287 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-r74vx" podStartSLOduration=123.655266866 podStartE2EDuration="2m3.655266866s" podCreationTimestamp="2025-11-25 16:47:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:31.652781908 +0000 UTC m=+146.492924003" watchObservedRunningTime="2025-11-25 16:49:31.655266866 +0000 UTC m=+146.495408961" Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.674401 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-5hrtq" event={"ID":"3b01e1d1-14fa-4594-b52e-07b377965f5e","Type":"ContainerStarted","Data":"47b68375186c6560fa0306f285b63edd81ee81d05859073193c5e205b8fe383b"} Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.674458 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-5hrtq" event={"ID":"3b01e1d1-14fa-4594-b52e-07b377965f5e","Type":"ContainerStarted","Data":"4d7386de15577a92a83bea3f6a951a028cb54d37e5f7be1c153f0125f5b3f0ac"} Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.677871 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-t7x6w" event={"ID":"3da5ea57-7db8-4799-b1eb-b18e1e9185ef","Type":"ContainerStarted","Data":"3a87718418e59b5fbd0818eea3a88121144c6ca526219d3a4b6455fbfdfac90a"} Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.709810 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-wmngc" podStartSLOduration=123.709796595 podStartE2EDuration="2m3.709796595s" podCreationTimestamp="2025-11-25 16:47:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:31.709222167 +0000 UTC m=+146.549364272" watchObservedRunningTime="2025-11-25 16:49:31.709796595 +0000 UTC m=+146.549938690" Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.721463 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:31 crc kubenswrapper[4812]: E1125 16:49:31.722885 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:32.222870812 +0000 UTC m=+147.063012907 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.725768 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-bwjqq" event={"ID":"8eab44d8-62dd-4507-9ffc-cb571fe12289","Type":"ContainerStarted","Data":"54f8d7f9962e5814b37c94456ec09fbdbbe6d6db1eff7af5a72bd981e0f99b7b"} Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.758915 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-splm8" podStartSLOduration=124.758900665 podStartE2EDuration="2m4.758900665s" podCreationTimestamp="2025-11-25 16:47:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:31.756449569 +0000 UTC m=+146.596591664" watchObservedRunningTime="2025-11-25 16:49:31.758900665 +0000 UTC m=+146.599042760" Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.790355 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-bwjqq" podStartSLOduration=6.790335724 podStartE2EDuration="6.790335724s" podCreationTimestamp="2025-11-25 16:49:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:31.789842199 +0000 UTC m=+146.629984294" watchObservedRunningTime="2025-11-25 16:49:31.790335724 +0000 UTC m=+146.630477819" Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.791233 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kxr75" event={"ID":"96e9ec11-59a4-42e6-b0e1-306160e54c23","Type":"ContainerStarted","Data":"49e78c32fbf1492289096791f36f0ee59f42fa88389e776d57cda260874b54c0"} Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.791273 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kxr75" event={"ID":"96e9ec11-59a4-42e6-b0e1-306160e54c23","Type":"ContainerStarted","Data":"e3bfa7a8258f73f6f811a051a94a5e8226f15abf15ac4cacd2daaae498cf1cf0"} Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.792084 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kxr75" Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.793291 4812 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-kxr75 container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.19:5443/healthz\": dial tcp 10.217.0.19:5443: connect: connection refused" start-of-body= Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.793330 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kxr75" podUID="96e9ec11-59a4-42e6-b0e1-306160e54c23" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.19:5443/healthz\": dial tcp 10.217.0.19:5443: connect: connection refused" Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.805403 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-x9d4d" event={"ID":"e3cdc2a6-f0b4-4b82-941d-3984c4ba8d2a","Type":"ContainerStarted","Data":"2a9abc351de182e8a02cf3161cd15da24d93d1da8d2438474cbc191fadefd43b"} Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.805455 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-x9d4d" event={"ID":"e3cdc2a6-f0b4-4b82-941d-3984c4ba8d2a","Type":"ContainerStarted","Data":"4401fdd1da618ce08a4ed224e3553b4dece889c91df8cb0895ceacc817875dda"} Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.814975 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sqpll" event={"ID":"10eaf74e-432a-4036-8d38-531569a99ac9","Type":"ContainerStarted","Data":"fb5ec402d13d48ba352478a688111a1e3cdddea762e7161b1508310fb6fa5673"} Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.815826 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sqpll" Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.820993 4812 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-sqpll container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.32:8443/healthz\": dial tcp 10.217.0.32:8443: connect: connection refused" start-of-body= Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.821065 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sqpll" podUID="10eaf74e-432a-4036-8d38-531569a99ac9" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.32:8443/healthz\": dial tcp 10.217.0.32:8443: connect: connection refused" Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.822220 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:31 crc kubenswrapper[4812]: E1125 16:49:31.822361 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:32.322336962 +0000 UTC m=+147.162479057 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.822496 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:31 crc kubenswrapper[4812]: E1125 16:49:31.842452 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:32.342432648 +0000 UTC m=+147.182574743 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.885816 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dgwd6" event={"ID":"6ae73445-81df-49ec-9c77-da00d65eef40","Type":"ContainerStarted","Data":"320971ed4b41217e6abefc5b08531d263297769ef00465c8bcf016826634c53c"} Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.885856 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-xv59p" event={"ID":"b297b670-7982-4cd1-8d8e-b1a3cda0dce1","Type":"ContainerStarted","Data":"e2519abc782afd7647b0f60f8ea10878701df068a4d3053c52b9b77f26a6e511"} Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.887284 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8p5fc" event={"ID":"a4485315-f2bc-47be-80a6-26508fa8719f","Type":"ContainerStarted","Data":"356daa7a670e8b17afe623b956ef6adb33f8ae4d3e84753336aa91b757fc6993"} Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.887311 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8p5fc" event={"ID":"a4485315-f2bc-47be-80a6-26508fa8719f","Type":"ContainerStarted","Data":"b918ae72c79b6ae66860e140fdeb0c2a2d400527b6cce1a3c34fa2d19bf80989"} Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.887773 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8p5fc" Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.889105 4812 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-8p5fc container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.18:8443/healthz\": dial tcp 10.217.0.18:8443: connect: connection refused" start-of-body= Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.889140 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8p5fc" podUID="a4485315-f2bc-47be-80a6-26508fa8719f" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.18:8443/healthz\": dial tcp 10.217.0.18:8443: connect: connection refused" Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.900649 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-5hrtq" podStartSLOduration=123.900634462 podStartE2EDuration="2m3.900634462s" podCreationTimestamp="2025-11-25 16:47:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:31.845387921 +0000 UTC m=+146.685530026" watchObservedRunningTime="2025-11-25 16:49:31.900634462 +0000 UTC m=+146.740776557" Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.901542 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-x9d4d" podStartSLOduration=123.90152204 podStartE2EDuration="2m3.90152204s" podCreationTimestamp="2025-11-25 16:47:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:31.899085514 +0000 UTC m=+146.739227599" watchObservedRunningTime="2025-11-25 16:49:31.90152204 +0000 UTC m=+146.741664135" Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.919365 4812 generic.go:334] "Generic (PLEG): container finished" podID="f93960d1-f97a-4d2d-9e04-edb082755a9a" containerID="0b56fab305517aaef14421389b93703b023a715bf7a78f72ed11adf32ca17dee" exitCode=0 Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.919459 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-4chwd" event={"ID":"f93960d1-f97a-4d2d-9e04-edb082755a9a","Type":"ContainerDied","Data":"0b56fab305517aaef14421389b93703b023a715bf7a78f72ed11adf32ca17dee"} Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.924083 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:31 crc kubenswrapper[4812]: E1125 16:49:31.925188 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:32.425160777 +0000 UTC m=+147.265302872 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.944922 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sqpll" podStartSLOduration=123.944906892 podStartE2EDuration="2m3.944906892s" podCreationTimestamp="2025-11-25 16:47:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:31.942885458 +0000 UTC m=+146.783027563" watchObservedRunningTime="2025-11-25 16:49:31.944906892 +0000 UTC m=+146.785048987" Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.949857 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-5gv9p" event={"ID":"e9cf419e-cde9-4a86-a206-24ac78d1e475","Type":"ContainerStarted","Data":"72b131e3d262087da75973048e675fa65e93aab0f26f79696e8de11dd857b287"} Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.949908 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-5gv9p" event={"ID":"e9cf419e-cde9-4a86-a206-24ac78d1e475","Type":"ContainerStarted","Data":"dafda0faf45b18850d12726033d066aeeadd29ae52d8f86cc165f68706d43ff0"} Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.966317 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401485-brhb9" event={"ID":"d7bb301b-f2a3-4526-b953-d6aa12d8621c","Type":"ContainerStarted","Data":"fa3b820d94a17c4fda80f9eb120eb5aec8cf4312f89c01e688ac8881128a851f"} Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.966390 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401485-brhb9" event={"ID":"d7bb301b-f2a3-4526-b953-d6aa12d8621c","Type":"ContainerStarted","Data":"b3672e31e01e94a18edf322a8cd41f6835fdd4f1116785390e7b20ed43561605"} Nov 25 16:49:31 crc kubenswrapper[4812]: I1125 16:49:31.991172 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kxr75" podStartSLOduration=123.991148243 podStartE2EDuration="2m3.991148243s" podCreationTimestamp="2025-11-25 16:47:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:31.988243912 +0000 UTC m=+146.828386007" watchObservedRunningTime="2025-11-25 16:49:31.991148243 +0000 UTC m=+146.831290338" Nov 25 16:49:32 crc kubenswrapper[4812]: I1125 16:49:32.001691 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-m57d7" event={"ID":"a559cd93-6cab-452d-8ff4-abbc835343bb","Type":"ContainerStarted","Data":"17c3f6f97faaf9f4fcf9b49bac900d59586a6dd2958c343d1917efbc661ee1ed"} Nov 25 16:49:32 crc kubenswrapper[4812]: I1125 16:49:32.001747 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-m57d7" event={"ID":"a559cd93-6cab-452d-8ff4-abbc835343bb","Type":"ContainerStarted","Data":"e614add56f9592ea93fe463801c9df18215de7ed994310addd913ff4c102ccf6"} Nov 25 16:49:32 crc kubenswrapper[4812]: I1125 16:49:32.021743 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w5fdp" event={"ID":"13085b2e-8c8e-4023-b86b-99dbfe7c7b5f","Type":"ContainerStarted","Data":"828e490e01c1fefa4a6d51c77544d5751840387ea9337ce1f14efcd55a5f0066"} Nov 25 16:49:32 crc kubenswrapper[4812]: I1125 16:49:32.022130 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w5fdp" event={"ID":"13085b2e-8c8e-4023-b86b-99dbfe7c7b5f","Type":"ContainerStarted","Data":"40b078b916dac53ad6cee430e0ccd6abaf0f42dae952ecbb475f6035758d6b05"} Nov 25 16:49:32 crc kubenswrapper[4812]: I1125 16:49:32.023113 4812 patch_prober.go:28] interesting pod/downloads-7954f5f757-dd95m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Nov 25 16:49:32 crc kubenswrapper[4812]: I1125 16:49:32.023168 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-dd95m" podUID="135cae13-5b75-4d98-9c17-61448faddf90" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Nov 25 16:49:32 crc kubenswrapper[4812]: I1125 16:49:32.025205 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:32 crc kubenswrapper[4812]: E1125 16:49:32.026165 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:32.526148264 +0000 UTC m=+147.366290439 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:32 crc kubenswrapper[4812]: I1125 16:49:32.036006 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-5gv9p" podStartSLOduration=124.03597689 podStartE2EDuration="2m4.03597689s" podCreationTimestamp="2025-11-25 16:47:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:32.02156132 +0000 UTC m=+146.861703415" watchObservedRunningTime="2025-11-25 16:49:32.03597689 +0000 UTC m=+146.876118985" Nov 25 16:49:32 crc kubenswrapper[4812]: I1125 16:49:32.060900 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-9t6bj" Nov 25 16:49:32 crc kubenswrapper[4812]: I1125 16:49:32.122244 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8p5fc" podStartSLOduration=124.122229187 podStartE2EDuration="2m4.122229187s" podCreationTimestamp="2025-11-25 16:47:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:32.119517812 +0000 UTC m=+146.959659907" watchObservedRunningTime="2025-11-25 16:49:32.122229187 +0000 UTC m=+146.962371282" Nov 25 16:49:32 crc kubenswrapper[4812]: I1125 16:49:32.125987 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:32 crc kubenswrapper[4812]: E1125 16:49:32.127561 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:32.627511512 +0000 UTC m=+147.467653607 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:32 crc kubenswrapper[4812]: I1125 16:49:32.176009 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29401485-brhb9" podStartSLOduration=125.175979703 podStartE2EDuration="2m5.175979703s" podCreationTimestamp="2025-11-25 16:47:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:32.154260145 +0000 UTC m=+146.994402240" watchObservedRunningTime="2025-11-25 16:49:32.175979703 +0000 UTC m=+147.016121818" Nov 25 16:49:32 crc kubenswrapper[4812]: I1125 16:49:32.200307 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-m57d7" podStartSLOduration=7.20028436 podStartE2EDuration="7.20028436s" podCreationTimestamp="2025-11-25 16:49:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:32.196495961 +0000 UTC m=+147.036638056" watchObservedRunningTime="2025-11-25 16:49:32.20028436 +0000 UTC m=+147.040426455" Nov 25 16:49:32 crc kubenswrapper[4812]: I1125 16:49:32.227588 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:32 crc kubenswrapper[4812]: E1125 16:49:32.227906 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:32.72789411 +0000 UTC m=+147.568036205 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:32 crc kubenswrapper[4812]: I1125 16:49:32.281387 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w5fdp" podStartSLOduration=124.281365446 podStartE2EDuration="2m4.281365446s" podCreationTimestamp="2025-11-25 16:47:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:32.242937739 +0000 UTC m=+147.083079844" watchObservedRunningTime="2025-11-25 16:49:32.281365446 +0000 UTC m=+147.121507541" Nov 25 16:49:32 crc kubenswrapper[4812]: I1125 16:49:32.328272 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:32 crc kubenswrapper[4812]: E1125 16:49:32.328655 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:32.82864094 +0000 UTC m=+147.668783035 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:32 crc kubenswrapper[4812]: I1125 16:49:32.430237 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:32 crc kubenswrapper[4812]: E1125 16:49:32.430650 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:32.930632208 +0000 UTC m=+147.770774303 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:32 crc kubenswrapper[4812]: I1125 16:49:32.531056 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:32 crc kubenswrapper[4812]: E1125 16:49:32.531230 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:33.031205411 +0000 UTC m=+147.871347506 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:32 crc kubenswrapper[4812]: I1125 16:49:32.531320 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:32 crc kubenswrapper[4812]: E1125 16:49:32.531654 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:33.031641136 +0000 UTC m=+147.871783231 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:32 crc kubenswrapper[4812]: I1125 16:49:32.617820 4812 patch_prober.go:28] interesting pod/router-default-5444994796-pxgkd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 16:49:32 crc kubenswrapper[4812]: [-]has-synced failed: reason withheld Nov 25 16:49:32 crc kubenswrapper[4812]: [+]process-running ok Nov 25 16:49:32 crc kubenswrapper[4812]: healthz check failed Nov 25 16:49:32 crc kubenswrapper[4812]: I1125 16:49:32.617890 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pxgkd" podUID="64f34f9a-3fdc-492d-a75f-93e4a3a8727f" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 16:49:32 crc kubenswrapper[4812]: I1125 16:49:32.632804 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:32 crc kubenswrapper[4812]: E1125 16:49:32.632984 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:33.132958083 +0000 UTC m=+147.973100168 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:32 crc kubenswrapper[4812]: I1125 16:49:32.633119 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:32 crc kubenswrapper[4812]: E1125 16:49:32.633454 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:33.133442517 +0000 UTC m=+147.973584612 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:32 crc kubenswrapper[4812]: I1125 16:49:32.656426 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-64j56" Nov 25 16:49:32 crc kubenswrapper[4812]: I1125 16:49:32.733924 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:32 crc kubenswrapper[4812]: E1125 16:49:32.734131 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:33.234099734 +0000 UTC m=+148.074241829 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:32 crc kubenswrapper[4812]: I1125 16:49:32.734295 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:32 crc kubenswrapper[4812]: E1125 16:49:32.734649 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:33.234634342 +0000 UTC m=+148.074776437 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:32 crc kubenswrapper[4812]: I1125 16:49:32.836698 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:32 crc kubenswrapper[4812]: E1125 16:49:32.837834 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:33.337817636 +0000 UTC m=+148.177959731 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:32 crc kubenswrapper[4812]: I1125 16:49:32.940320 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:32 crc kubenswrapper[4812]: E1125 16:49:32.940743 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:33.440728284 +0000 UTC m=+148.280870379 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.035036 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-4chwd" event={"ID":"f93960d1-f97a-4d2d-9e04-edb082755a9a","Type":"ContainerStarted","Data":"6f92db1de198c4e707baa08f9544468e986127a713d1553230f4f1c3b6175cff"} Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.039746 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dgwd6" event={"ID":"6ae73445-81df-49ec-9c77-da00d65eef40","Type":"ContainerStarted","Data":"b0a7f77eeff76ec8f0761685121f56338cb6cced51aba0b4b9af55087de9636e"} Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.041024 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dgwd6" Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.041811 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:33 crc kubenswrapper[4812]: E1125 16:49:33.042058 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:33.54203538 +0000 UTC m=+148.382177475 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.042895 4812 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-dgwd6 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" start-of-body= Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.042936 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dgwd6" podUID="6ae73445-81df-49ec-9c77-da00d65eef40" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.045374 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-hw9bx" event={"ID":"5cca6293-58b3-40fc-939d-f43aa66a57e7","Type":"ContainerStarted","Data":"9d69582d83db12493f488a011e4c34229df5d1f99e129704946a06f0abc0809a"} Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.047623 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7cvbf" event={"ID":"84c147c9-ea21-4479-8f78-0f12faa15a2e","Type":"ContainerStarted","Data":"fad49bd660c5c75196038fdb8cd20a5a6c6a489107fbdf348e9d3a222dba607e"} Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.048979 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-pt5mz" event={"ID":"29ba287e-6a40-474a-88ed-c3cd0c9657b4","Type":"ContainerStarted","Data":"f7292277225ca50d4a0394b401a15173463ef0ce17f7119d055eed5e416c84ce"} Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.050839 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-xv59p" event={"ID":"b297b670-7982-4cd1-8d8e-b1a3cda0dce1","Type":"ContainerStarted","Data":"6c71873e24cee2fa8ccc8e8866e2ecc2aa0984e11615407e86092f99d621a60a"} Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.050913 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-xv59p" event={"ID":"b297b670-7982-4cd1-8d8e-b1a3cda0dce1","Type":"ContainerStarted","Data":"2939c0b48eb1bedcc0cc727dc63bfd7c4d99df32f2f288ba2c52539a9abad0d8"} Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.051025 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-xv59p" Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.055085 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-fc6mw" event={"ID":"f030671f-5121-4cb4-8163-5c65444c1896","Type":"ContainerStarted","Data":"b5449afa7785b66dfb0a930e9d1679533fc688f852174ac53431b57fdb9983da"} Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.058108 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-nkjmk" event={"ID":"b1a50fff-1913-47bd-be58-05284eb4e40e","Type":"ContainerStarted","Data":"f891352dccaa3f8e390525509b44323e659262484d00152c639e5c4a91a505ea"} Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.058149 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-nkjmk" Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.060592 4812 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-kxr75 container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.19:5443/healthz\": dial tcp 10.217.0.19:5443: connect: connection refused" start-of-body= Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.060644 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kxr75" podUID="96e9ec11-59a4-42e6-b0e1-306160e54c23" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.19:5443/healthz\": dial tcp 10.217.0.19:5443: connect: connection refused" Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.061223 4812 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-8p5fc container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.18:8443/healthz\": dial tcp 10.217.0.18:8443: connect: connection refused" start-of-body= Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.061253 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8p5fc" podUID="a4485315-f2bc-47be-80a6-26508fa8719f" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.18:8443/healthz\": dial tcp 10.217.0.18:8443: connect: connection refused" Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.062586 4812 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-v7lp8 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.20:8080/healthz\": dial tcp 10.217.0.20:8080: connect: connection refused" start-of-body= Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.062629 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-v7lp8" podUID="be7271bc-142b-4cff-aabd-8a69d6373849" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.20:8080/healthz\": dial tcp 10.217.0.20:8080: connect: connection refused" Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.063912 4812 patch_prober.go:28] interesting pod/console-operator-58897d9998-splm8 container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.15:8443/readyz\": dial tcp 10.217.0.15:8443: connect: connection refused" start-of-body= Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.063937 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-splm8" podUID="34cd8aa7-5566-4824-8c72-10438437ef94" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.15:8443/readyz\": dial tcp 10.217.0.15:8443: connect: connection refused" Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.083723 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dgwd6" podStartSLOduration=125.083705389 podStartE2EDuration="2m5.083705389s" podCreationTimestamp="2025-11-25 16:47:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:33.081863252 +0000 UTC m=+147.922005347" watchObservedRunningTime="2025-11-25 16:49:33.083705389 +0000 UTC m=+147.923847484" Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.093810 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sqpll" Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.119700 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-xv59p" podStartSLOduration=8.11967899 podStartE2EDuration="8.11967899s" podCreationTimestamp="2025-11-25 16:49:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:33.118154923 +0000 UTC m=+147.958297018" watchObservedRunningTime="2025-11-25 16:49:33.11967899 +0000 UTC m=+147.959821085" Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.141912 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-hw9bx" podStartSLOduration=125.141892533 podStartE2EDuration="2m5.141892533s" podCreationTimestamp="2025-11-25 16:47:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:33.141299684 +0000 UTC m=+147.981441779" watchObservedRunningTime="2025-11-25 16:49:33.141892533 +0000 UTC m=+147.982034628" Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.144773 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:33 crc kubenswrapper[4812]: E1125 16:49:33.148156 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:33.648136067 +0000 UTC m=+148.488278162 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.179988 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-fc6mw" podStartSLOduration=125.179955939 podStartE2EDuration="2m5.179955939s" podCreationTimestamp="2025-11-25 16:47:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:33.178775872 +0000 UTC m=+148.018917967" watchObservedRunningTime="2025-11-25 16:49:33.179955939 +0000 UTC m=+148.020098034" Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.246970 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:33 crc kubenswrapper[4812]: E1125 16:49:33.247169 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:33.747141332 +0000 UTC m=+148.587283427 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.247269 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:33 crc kubenswrapper[4812]: E1125 16:49:33.247601 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:33.747588416 +0000 UTC m=+148.587730511 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.267495 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-nkjmk" podStartSLOduration=125.267470475 podStartE2EDuration="2m5.267470475s" podCreationTimestamp="2025-11-25 16:47:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:33.223295139 +0000 UTC m=+148.063437234" watchObservedRunningTime="2025-11-25 16:49:33.267470475 +0000 UTC m=+148.107612570" Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.293581 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7cvbf" podStartSLOduration=125.293565679 podStartE2EDuration="2m5.293565679s" podCreationTimestamp="2025-11-25 16:47:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:33.291840605 +0000 UTC m=+148.131982700" watchObservedRunningTime="2025-11-25 16:49:33.293565679 +0000 UTC m=+148.133707774" Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.294765 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-rqlx6" podStartSLOduration=125.294759906 podStartE2EDuration="2m5.294759906s" podCreationTimestamp="2025-11-25 16:47:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:33.269822289 +0000 UTC m=+148.109964394" watchObservedRunningTime="2025-11-25 16:49:33.294759906 +0000 UTC m=+148.134902001" Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.322707 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-pt5mz" podStartSLOduration=125.322674486 podStartE2EDuration="2m5.322674486s" podCreationTimestamp="2025-11-25 16:47:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:33.321638904 +0000 UTC m=+148.161780999" watchObservedRunningTime="2025-11-25 16:49:33.322674486 +0000 UTC m=+148.162816581" Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.348849 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:33 crc kubenswrapper[4812]: E1125 16:49:33.349111 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:33.849068259 +0000 UTC m=+148.689210354 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.349347 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:33 crc kubenswrapper[4812]: E1125 16:49:33.349719 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:33.849704948 +0000 UTC m=+148.689847043 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.450921 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:33 crc kubenswrapper[4812]: E1125 16:49:33.451554 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:33.951519231 +0000 UTC m=+148.791661316 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.552571 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:33 crc kubenswrapper[4812]: E1125 16:49:33.553127 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:34.053109666 +0000 UTC m=+148.893251761 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.615943 4812 patch_prober.go:28] interesting pod/router-default-5444994796-pxgkd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 16:49:33 crc kubenswrapper[4812]: [-]has-synced failed: reason withheld Nov 25 16:49:33 crc kubenswrapper[4812]: [+]process-running ok Nov 25 16:49:33 crc kubenswrapper[4812]: healthz check failed Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.616023 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pxgkd" podUID="64f34f9a-3fdc-492d-a75f-93e4a3a8727f" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.653831 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.654080 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.654127 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:49:33 crc kubenswrapper[4812]: E1125 16:49:33.654671 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:34.154642461 +0000 UTC m=+148.994784556 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.658611 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.662945 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.755114 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.755798 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.755987 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:49:33 crc kubenswrapper[4812]: E1125 16:49:33.756290 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:34.256270727 +0000 UTC m=+149.096412822 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.760522 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.762605 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.850794 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.857595 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:33 crc kubenswrapper[4812]: E1125 16:49:33.857908 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:34.357840203 +0000 UTC m=+149.197982438 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.861461 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.872883 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 16:49:33 crc kubenswrapper[4812]: I1125 16:49:33.959340 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:33 crc kubenswrapper[4812]: E1125 16:49:33.959695 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:34.459682137 +0000 UTC m=+149.299824222 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.060105 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:34 crc kubenswrapper[4812]: E1125 16:49:34.060377 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:34.560327073 +0000 UTC m=+149.400469178 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.060523 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:34 crc kubenswrapper[4812]: E1125 16:49:34.061068 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:34.561047215 +0000 UTC m=+149.401189310 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.086925 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-4chwd" event={"ID":"f93960d1-f97a-4d2d-9e04-edb082755a9a","Type":"ContainerStarted","Data":"6132f927e60c057056ca036f647b421e405916aa14bbdc9abbed680a47fb8e14"} Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.089171 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-t7x6w" event={"ID":"3da5ea57-7db8-4799-b1eb-b18e1e9185ef","Type":"ContainerStarted","Data":"ad38e668ce3028c1dde511200e4d782ae477be162f47f031467682c50ee98024"} Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.089916 4812 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-v7lp8 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.20:8080/healthz\": dial tcp 10.217.0.20:8080: connect: connection refused" start-of-body= Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.089967 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-v7lp8" podUID="be7271bc-142b-4cff-aabd-8a69d6373849" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.20:8080/healthz\": dial tcp 10.217.0.20:8080: connect: connection refused" Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.137290 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-4chwd" podStartSLOduration=127.13727086 podStartE2EDuration="2m7.13727086s" podCreationTimestamp="2025-11-25 16:47:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:34.131748289 +0000 UTC m=+148.971890394" watchObservedRunningTime="2025-11-25 16:49:34.13727086 +0000 UTC m=+148.977412955" Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.161739 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:34 crc kubenswrapper[4812]: E1125 16:49:34.162141 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:34.662126125 +0000 UTC m=+149.502268220 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.252132 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-kxr75" Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.268856 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:34 crc kubenswrapper[4812]: E1125 16:49:34.275068 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:34.775051094 +0000 UTC m=+149.615193189 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.372223 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:34 crc kubenswrapper[4812]: E1125 16:49:34.372631 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:34.872616155 +0000 UTC m=+149.712758250 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.475815 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:34 crc kubenswrapper[4812]: E1125 16:49:34.476124 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:34.976112899 +0000 UTC m=+149.816254994 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.582133 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:34 crc kubenswrapper[4812]: E1125 16:49:34.582595 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:35.082573267 +0000 UTC m=+149.922715362 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.589323 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-whrrz"] Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.596416 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-whrrz" Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.602139 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-whrrz"] Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.602525 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.626165 4812 patch_prober.go:28] interesting pod/router-default-5444994796-pxgkd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 16:49:34 crc kubenswrapper[4812]: [-]has-synced failed: reason withheld Nov 25 16:49:34 crc kubenswrapper[4812]: [+]process-running ok Nov 25 16:49:34 crc kubenswrapper[4812]: healthz check failed Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.626315 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pxgkd" podUID="64f34f9a-3fdc-492d-a75f-93e4a3a8727f" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.684225 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9ms8g\" (UniqueName: \"kubernetes.io/projected/17f59f39-a958-4cb4-8a6a-679e7f08a13b-kube-api-access-9ms8g\") pod \"certified-operators-whrrz\" (UID: \"17f59f39-a958-4cb4-8a6a-679e7f08a13b\") " pod="openshift-marketplace/certified-operators-whrrz" Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.684642 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.684690 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/17f59f39-a958-4cb4-8a6a-679e7f08a13b-catalog-content\") pod \"certified-operators-whrrz\" (UID: \"17f59f39-a958-4cb4-8a6a-679e7f08a13b\") " pod="openshift-marketplace/certified-operators-whrrz" Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.684723 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/17f59f39-a958-4cb4-8a6a-679e7f08a13b-utilities\") pod \"certified-operators-whrrz\" (UID: \"17f59f39-a958-4cb4-8a6a-679e7f08a13b\") " pod="openshift-marketplace/certified-operators-whrrz" Nov 25 16:49:34 crc kubenswrapper[4812]: E1125 16:49:34.685060 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:35.1850447 +0000 UTC m=+150.025186805 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.785856 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:34 crc kubenswrapper[4812]: E1125 16:49:34.786064 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:35.286029067 +0000 UTC m=+150.126171172 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.786137 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.786200 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/17f59f39-a958-4cb4-8a6a-679e7f08a13b-catalog-content\") pod \"certified-operators-whrrz\" (UID: \"17f59f39-a958-4cb4-8a6a-679e7f08a13b\") " pod="openshift-marketplace/certified-operators-whrrz" Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.786235 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/17f59f39-a958-4cb4-8a6a-679e7f08a13b-utilities\") pod \"certified-operators-whrrz\" (UID: \"17f59f39-a958-4cb4-8a6a-679e7f08a13b\") " pod="openshift-marketplace/certified-operators-whrrz" Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.786280 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9ms8g\" (UniqueName: \"kubernetes.io/projected/17f59f39-a958-4cb4-8a6a-679e7f08a13b-kube-api-access-9ms8g\") pod \"certified-operators-whrrz\" (UID: \"17f59f39-a958-4cb4-8a6a-679e7f08a13b\") " pod="openshift-marketplace/certified-operators-whrrz" Nov 25 16:49:34 crc kubenswrapper[4812]: E1125 16:49:34.786858 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:35.286847052 +0000 UTC m=+150.126989147 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.787029 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/17f59f39-a958-4cb4-8a6a-679e7f08a13b-catalog-content\") pod \"certified-operators-whrrz\" (UID: \"17f59f39-a958-4cb4-8a6a-679e7f08a13b\") " pod="openshift-marketplace/certified-operators-whrrz" Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.787296 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/17f59f39-a958-4cb4-8a6a-679e7f08a13b-utilities\") pod \"certified-operators-whrrz\" (UID: \"17f59f39-a958-4cb4-8a6a-679e7f08a13b\") " pod="openshift-marketplace/certified-operators-whrrz" Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.792919 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-49nlv"] Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.801217 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-49nlv" Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.812986 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.822628 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-49nlv"] Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.824717 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9ms8g\" (UniqueName: \"kubernetes.io/projected/17f59f39-a958-4cb4-8a6a-679e7f08a13b-kube-api-access-9ms8g\") pod \"certified-operators-whrrz\" (UID: \"17f59f39-a958-4cb4-8a6a-679e7f08a13b\") " pod="openshift-marketplace/certified-operators-whrrz" Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.842474 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-splm8" Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.911772 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.912193 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c1f1e583-20dd-4501-bb66-a4ee8239367a-catalog-content\") pod \"community-operators-49nlv\" (UID: \"c1f1e583-20dd-4501-bb66-a4ee8239367a\") " pod="openshift-marketplace/community-operators-49nlv" Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.912289 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x6jjg\" (UniqueName: \"kubernetes.io/projected/c1f1e583-20dd-4501-bb66-a4ee8239367a-kube-api-access-x6jjg\") pod \"community-operators-49nlv\" (UID: \"c1f1e583-20dd-4501-bb66-a4ee8239367a\") " pod="openshift-marketplace/community-operators-49nlv" Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.912396 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c1f1e583-20dd-4501-bb66-a4ee8239367a-utilities\") pod \"community-operators-49nlv\" (UID: \"c1f1e583-20dd-4501-bb66-a4ee8239367a\") " pod="openshift-marketplace/community-operators-49nlv" Nov 25 16:49:34 crc kubenswrapper[4812]: E1125 16:49:34.912586 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:35.41256161 +0000 UTC m=+150.252703705 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.970774 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-whrrz" Nov 25 16:49:34 crc kubenswrapper[4812]: I1125 16:49:34.986318 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-fgmnd"] Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.015336 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.015407 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c1f1e583-20dd-4501-bb66-a4ee8239367a-utilities\") pod \"community-operators-49nlv\" (UID: \"c1f1e583-20dd-4501-bb66-a4ee8239367a\") " pod="openshift-marketplace/community-operators-49nlv" Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.015435 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c1f1e583-20dd-4501-bb66-a4ee8239367a-catalog-content\") pod \"community-operators-49nlv\" (UID: \"c1f1e583-20dd-4501-bb66-a4ee8239367a\") " pod="openshift-marketplace/community-operators-49nlv" Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.015492 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x6jjg\" (UniqueName: \"kubernetes.io/projected/c1f1e583-20dd-4501-bb66-a4ee8239367a-kube-api-access-x6jjg\") pod \"community-operators-49nlv\" (UID: \"c1f1e583-20dd-4501-bb66-a4ee8239367a\") " pod="openshift-marketplace/community-operators-49nlv" Nov 25 16:49:35 crc kubenswrapper[4812]: E1125 16:49:35.016079 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:35.516057535 +0000 UTC m=+150.356199700 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.016198 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c1f1e583-20dd-4501-bb66-a4ee8239367a-utilities\") pod \"community-operators-49nlv\" (UID: \"c1f1e583-20dd-4501-bb66-a4ee8239367a\") " pod="openshift-marketplace/community-operators-49nlv" Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.019882 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c1f1e583-20dd-4501-bb66-a4ee8239367a-catalog-content\") pod \"community-operators-49nlv\" (UID: \"c1f1e583-20dd-4501-bb66-a4ee8239367a\") " pod="openshift-marketplace/community-operators-49nlv" Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.021985 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fgmnd"] Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.022108 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fgmnd" Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.037452 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dgwd6" Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.050523 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x6jjg\" (UniqueName: \"kubernetes.io/projected/c1f1e583-20dd-4501-bb66-a4ee8239367a-kube-api-access-x6jjg\") pod \"community-operators-49nlv\" (UID: \"c1f1e583-20dd-4501-bb66-a4ee8239367a\") " pod="openshift-marketplace/community-operators-49nlv" Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.117764 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:35 crc kubenswrapper[4812]: E1125 16:49:35.118381 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:35.618352083 +0000 UTC m=+150.458494178 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.191866 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"25cdbe350b1a48fbf6b9e86ec68848e305f6a391a9b61c95623750ada1d8c9bc"} Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.192203 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"835c0d0b2058e4a7e145dd082d07164943a7a27f27973d1e2e1ff6d21846e9d9"} Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.205144 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-mdzhn"] Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.214645 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mdzhn" Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.216413 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-49nlv" Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.224070 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.224165 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/906a6b44-74ed-468a-8519-c269d04cf34b-utilities\") pod \"certified-operators-fgmnd\" (UID: \"906a6b44-74ed-468a-8519-c269d04cf34b\") " pod="openshift-marketplace/certified-operators-fgmnd" Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.224252 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/906a6b44-74ed-468a-8519-c269d04cf34b-catalog-content\") pod \"certified-operators-fgmnd\" (UID: \"906a6b44-74ed-468a-8519-c269d04cf34b\") " pod="openshift-marketplace/certified-operators-fgmnd" Nov 25 16:49:35 crc kubenswrapper[4812]: E1125 16:49:35.225674 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:35.725659587 +0000 UTC m=+150.565801682 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.226492 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2wff8\" (UniqueName: \"kubernetes.io/projected/906a6b44-74ed-468a-8519-c269d04cf34b-kube-api-access-2wff8\") pod \"certified-operators-fgmnd\" (UID: \"906a6b44-74ed-468a-8519-c269d04cf34b\") " pod="openshift-marketplace/certified-operators-fgmnd" Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.242791 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"b93b4436b71f1bc77e05b100975f53f600cf68f79a03694ddd1e05ff6f610220"} Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.266413 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mdzhn"] Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.281895 4812 generic.go:334] "Generic (PLEG): container finished" podID="d7bb301b-f2a3-4526-b953-d6aa12d8621c" containerID="fa3b820d94a17c4fda80f9eb120eb5aec8cf4312f89c01e688ac8881128a851f" exitCode=0 Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.282193 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401485-brhb9" event={"ID":"d7bb301b-f2a3-4526-b953-d6aa12d8621c","Type":"ContainerDied","Data":"fa3b820d94a17c4fda80f9eb120eb5aec8cf4312f89c01e688ac8881128a851f"} Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.303284 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"f4d99a8a349dd1640bd585e158cac5f4781ce3e8d217b2f5c6b49c56659c5899"} Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.332728 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.333230 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/56af28bf-996b-442f-9caa-87f498b292cf-utilities\") pod \"community-operators-mdzhn\" (UID: \"56af28bf-996b-442f-9caa-87f498b292cf\") " pod="openshift-marketplace/community-operators-mdzhn" Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.333277 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-glz4p\" (UniqueName: \"kubernetes.io/projected/56af28bf-996b-442f-9caa-87f498b292cf-kube-api-access-glz4p\") pod \"community-operators-mdzhn\" (UID: \"56af28bf-996b-442f-9caa-87f498b292cf\") " pod="openshift-marketplace/community-operators-mdzhn" Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.333343 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/56af28bf-996b-442f-9caa-87f498b292cf-catalog-content\") pod \"community-operators-mdzhn\" (UID: \"56af28bf-996b-442f-9caa-87f498b292cf\") " pod="openshift-marketplace/community-operators-mdzhn" Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.333383 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/906a6b44-74ed-468a-8519-c269d04cf34b-utilities\") pod \"certified-operators-fgmnd\" (UID: \"906a6b44-74ed-468a-8519-c269d04cf34b\") " pod="openshift-marketplace/certified-operators-fgmnd" Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.333408 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/906a6b44-74ed-468a-8519-c269d04cf34b-catalog-content\") pod \"certified-operators-fgmnd\" (UID: \"906a6b44-74ed-468a-8519-c269d04cf34b\") " pod="openshift-marketplace/certified-operators-fgmnd" Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.333453 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2wff8\" (UniqueName: \"kubernetes.io/projected/906a6b44-74ed-468a-8519-c269d04cf34b-kube-api-access-2wff8\") pod \"certified-operators-fgmnd\" (UID: \"906a6b44-74ed-468a-8519-c269d04cf34b\") " pod="openshift-marketplace/certified-operators-fgmnd" Nov 25 16:49:35 crc kubenswrapper[4812]: E1125 16:49:35.335546 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:35.835500599 +0000 UTC m=+150.675642744 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.335711 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/906a6b44-74ed-468a-8519-c269d04cf34b-utilities\") pod \"certified-operators-fgmnd\" (UID: \"906a6b44-74ed-468a-8519-c269d04cf34b\") " pod="openshift-marketplace/certified-operators-fgmnd" Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.338937 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/906a6b44-74ed-468a-8519-c269d04cf34b-catalog-content\") pod \"certified-operators-fgmnd\" (UID: \"906a6b44-74ed-468a-8519-c269d04cf34b\") " pod="openshift-marketplace/certified-operators-fgmnd" Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.380951 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2wff8\" (UniqueName: \"kubernetes.io/projected/906a6b44-74ed-468a-8519-c269d04cf34b-kube-api-access-2wff8\") pod \"certified-operators-fgmnd\" (UID: \"906a6b44-74ed-468a-8519-c269d04cf34b\") " pod="openshift-marketplace/certified-operators-fgmnd" Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.400059 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fgmnd" Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.439717 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.439788 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/56af28bf-996b-442f-9caa-87f498b292cf-utilities\") pod \"community-operators-mdzhn\" (UID: \"56af28bf-996b-442f-9caa-87f498b292cf\") " pod="openshift-marketplace/community-operators-mdzhn" Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.439812 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glz4p\" (UniqueName: \"kubernetes.io/projected/56af28bf-996b-442f-9caa-87f498b292cf-kube-api-access-glz4p\") pod \"community-operators-mdzhn\" (UID: \"56af28bf-996b-442f-9caa-87f498b292cf\") " pod="openshift-marketplace/community-operators-mdzhn" Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.439843 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/56af28bf-996b-442f-9caa-87f498b292cf-catalog-content\") pod \"community-operators-mdzhn\" (UID: \"56af28bf-996b-442f-9caa-87f498b292cf\") " pod="openshift-marketplace/community-operators-mdzhn" Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.440259 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/56af28bf-996b-442f-9caa-87f498b292cf-catalog-content\") pod \"community-operators-mdzhn\" (UID: \"56af28bf-996b-442f-9caa-87f498b292cf\") " pod="openshift-marketplace/community-operators-mdzhn" Nov 25 16:49:35 crc kubenswrapper[4812]: E1125 16:49:35.440522 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:35.940510982 +0000 UTC m=+150.780653077 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.440877 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/56af28bf-996b-442f-9caa-87f498b292cf-utilities\") pod \"community-operators-mdzhn\" (UID: \"56af28bf-996b-442f-9caa-87f498b292cf\") " pod="openshift-marketplace/community-operators-mdzhn" Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.484775 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-glz4p\" (UniqueName: \"kubernetes.io/projected/56af28bf-996b-442f-9caa-87f498b292cf-kube-api-access-glz4p\") pod \"community-operators-mdzhn\" (UID: \"56af28bf-996b-442f-9caa-87f498b292cf\") " pod="openshift-marketplace/community-operators-mdzhn" Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.510007 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-whrrz"] Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.542462 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:35 crc kubenswrapper[4812]: E1125 16:49:35.543030 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:36.043010606 +0000 UTC m=+150.883152701 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.582865 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mdzhn" Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.627018 4812 patch_prober.go:28] interesting pod/router-default-5444994796-pxgkd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 16:49:35 crc kubenswrapper[4812]: [-]has-synced failed: reason withheld Nov 25 16:49:35 crc kubenswrapper[4812]: [+]process-running ok Nov 25 16:49:35 crc kubenswrapper[4812]: healthz check failed Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.627077 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pxgkd" podUID="64f34f9a-3fdc-492d-a75f-93e4a3a8727f" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.645199 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:35 crc kubenswrapper[4812]: E1125 16:49:35.645508 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:36.14549727 +0000 UTC m=+150.985639365 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.707105 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-49nlv"] Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.747385 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:35 crc kubenswrapper[4812]: E1125 16:49:35.747897 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:36.24786546 +0000 UTC m=+151.088007555 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.851418 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:35 crc kubenswrapper[4812]: E1125 16:49:35.852108 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:36.352085177 +0000 UTC m=+151.192227272 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.924257 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fgmnd"] Nov 25 16:49:35 crc kubenswrapper[4812]: E1125 16:49:35.954856 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:36.45483341 +0000 UTC m=+151.294975515 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.954660 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:35 crc kubenswrapper[4812]: I1125 16:49:35.955671 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:35 crc kubenswrapper[4812]: E1125 16:49:35.956139 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:36.45611936 +0000 UTC m=+151.296261465 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.058675 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:36 crc kubenswrapper[4812]: E1125 16:49:36.058935 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:36.558901452 +0000 UTC m=+151.399043557 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.059042 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:36 crc kubenswrapper[4812]: E1125 16:49:36.059447 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:36.559434559 +0000 UTC m=+151.399576654 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.059860 4812 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.160245 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:36 crc kubenswrapper[4812]: E1125 16:49:36.160682 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:36.660641213 +0000 UTC m=+151.500783308 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.161006 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:36 crc kubenswrapper[4812]: E1125 16:49:36.161484 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:36.661474108 +0000 UTC m=+151.501616213 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.229832 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mdzhn"] Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.262592 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:36 crc kubenswrapper[4812]: E1125 16:49:36.262718 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:36.762694582 +0000 UTC m=+151.602836677 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.263142 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:36 crc kubenswrapper[4812]: E1125 16:49:36.263502 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:36.763489868 +0000 UTC m=+151.603631963 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.353557 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"e261167dae6bd5f45d91980d4308107839f6e5a6dc6ee5292b644280eff5a5bf"} Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.358885 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mdzhn" event={"ID":"56af28bf-996b-442f-9caa-87f498b292cf","Type":"ContainerStarted","Data":"07b3f31507f65edb3caec1f6c2d41b3021b755d380782b444d203e7a6459f232"} Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.360561 4812 generic.go:334] "Generic (PLEG): container finished" podID="17f59f39-a958-4cb4-8a6a-679e7f08a13b" containerID="1de63071db6944870c4b377267fa03c8c2456afdea90ee3f56b1f01ba6793bad" exitCode=0 Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.360927 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-whrrz" event={"ID":"17f59f39-a958-4cb4-8a6a-679e7f08a13b","Type":"ContainerDied","Data":"1de63071db6944870c4b377267fa03c8c2456afdea90ee3f56b1f01ba6793bad"} Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.360965 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-whrrz" event={"ID":"17f59f39-a958-4cb4-8a6a-679e7f08a13b","Type":"ContainerStarted","Data":"d6c55dcc7da5e155552e9706b789addefa5aad14264b0e66046c202c15d8bdc9"} Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.362590 4812 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.363023 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"7bd7f46f3827756196a819f5aec5930b69bc49de9f0a89a8c2c836d2b30b10ed"} Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.363056 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.363762 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:36 crc kubenswrapper[4812]: E1125 16:49:36.363890 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:36.863850055 +0000 UTC m=+151.703992150 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.364287 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:36 crc kubenswrapper[4812]: E1125 16:49:36.364680 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:36.86466982 +0000 UTC m=+151.704811915 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.364790 4812 generic.go:334] "Generic (PLEG): container finished" podID="906a6b44-74ed-468a-8519-c269d04cf34b" containerID="8d557da2401460f23c16fe25e3ce2c8e1717cab2ed49feb5b09823061b98cb80" exitCode=0 Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.364874 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fgmnd" event={"ID":"906a6b44-74ed-468a-8519-c269d04cf34b","Type":"ContainerDied","Data":"8d557da2401460f23c16fe25e3ce2c8e1717cab2ed49feb5b09823061b98cb80"} Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.364903 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fgmnd" event={"ID":"906a6b44-74ed-468a-8519-c269d04cf34b","Type":"ContainerStarted","Data":"2b2256c5778bf115395fb75aa48c56a4f3840b521e487ee8cae4209855a81abd"} Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.365983 4812 generic.go:334] "Generic (PLEG): container finished" podID="c1f1e583-20dd-4501-bb66-a4ee8239367a" containerID="462b7370dd7a71b8fb3c6bdde42b025cc74d36791d39b2a2ef9856674ecfba2c" exitCode=0 Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.366039 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-49nlv" event={"ID":"c1f1e583-20dd-4501-bb66-a4ee8239367a","Type":"ContainerDied","Data":"462b7370dd7a71b8fb3c6bdde42b025cc74d36791d39b2a2ef9856674ecfba2c"} Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.366061 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-49nlv" event={"ID":"c1f1e583-20dd-4501-bb66-a4ee8239367a","Type":"ContainerStarted","Data":"b060737aaaed9ca88cbd9871a4172a7439a7b3daf621eddf0114d45a05d0089b"} Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.385415 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-t7x6w" event={"ID":"3da5ea57-7db8-4799-b1eb-b18e1e9185ef","Type":"ContainerStarted","Data":"4bc0e186dc262a2597b76e2cddde2b3a6b229ce794f8f178e96e0feadd71598b"} Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.385464 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-t7x6w" event={"ID":"3da5ea57-7db8-4799-b1eb-b18e1e9185ef","Type":"ContainerStarted","Data":"6658b3ff42005ceb23f666ab8f8ffdee4497619b3a05d6e589c913fcb78fe4f8"} Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.465627 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:36 crc kubenswrapper[4812]: E1125 16:49:36.466446 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:36.966419241 +0000 UTC m=+151.806561336 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.567719 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:36 crc kubenswrapper[4812]: E1125 16:49:36.567993 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:37.067979277 +0000 UTC m=+151.908121372 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.612706 4812 patch_prober.go:28] interesting pod/router-default-5444994796-pxgkd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 16:49:36 crc kubenswrapper[4812]: [-]has-synced failed: reason withheld Nov 25 16:49:36 crc kubenswrapper[4812]: [+]process-running ok Nov 25 16:49:36 crc kubenswrapper[4812]: healthz check failed Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.612799 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pxgkd" podUID="64f34f9a-3fdc-492d-a75f-93e4a3a8727f" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.647288 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401485-brhb9" Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.673295 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d7bb301b-f2a3-4526-b953-d6aa12d8621c-secret-volume\") pod \"d7bb301b-f2a3-4526-b953-d6aa12d8621c\" (UID: \"d7bb301b-f2a3-4526-b953-d6aa12d8621c\") " Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.673376 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vjqx4\" (UniqueName: \"kubernetes.io/projected/d7bb301b-f2a3-4526-b953-d6aa12d8621c-kube-api-access-vjqx4\") pod \"d7bb301b-f2a3-4526-b953-d6aa12d8621c\" (UID: \"d7bb301b-f2a3-4526-b953-d6aa12d8621c\") " Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.673497 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.673519 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d7bb301b-f2a3-4526-b953-d6aa12d8621c-config-volume\") pod \"d7bb301b-f2a3-4526-b953-d6aa12d8621c\" (UID: \"d7bb301b-f2a3-4526-b953-d6aa12d8621c\") " Nov 25 16:49:36 crc kubenswrapper[4812]: E1125 16:49:36.676616 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:37.176585661 +0000 UTC m=+152.016727756 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.677101 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d7bb301b-f2a3-4526-b953-d6aa12d8621c-config-volume" (OuterVolumeSpecName: "config-volume") pod "d7bb301b-f2a3-4526-b953-d6aa12d8621c" (UID: "d7bb301b-f2a3-4526-b953-d6aa12d8621c"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.682076 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d7bb301b-f2a3-4526-b953-d6aa12d8621c-kube-api-access-vjqx4" (OuterVolumeSpecName: "kube-api-access-vjqx4") pod "d7bb301b-f2a3-4526-b953-d6aa12d8621c" (UID: "d7bb301b-f2a3-4526-b953-d6aa12d8621c"). InnerVolumeSpecName "kube-api-access-vjqx4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.682130 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7bb301b-f2a3-4526-b953-d6aa12d8621c-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "d7bb301b-f2a3-4526-b953-d6aa12d8621c" (UID: "d7bb301b-f2a3-4526-b953-d6aa12d8621c"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:49:36 crc kubenswrapper[4812]: E1125 16:49:36.703197 4812 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod56af28bf_996b_442f_9caa_87f498b292cf.slice/crio-e295a6c68fab1b5a986c563cc49aff9e5b031db56d03e1f93694d7ea07e2c89a.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod56af28bf_996b_442f_9caa_87f498b292cf.slice/crio-conmon-e295a6c68fab1b5a986c563cc49aff9e5b031db56d03e1f93694d7ea07e2c89a.scope\": RecentStats: unable to find data in memory cache]" Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.775550 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.775655 4812 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d7bb301b-f2a3-4526-b953-d6aa12d8621c-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.775669 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vjqx4\" (UniqueName: \"kubernetes.io/projected/d7bb301b-f2a3-4526-b953-d6aa12d8621c-kube-api-access-vjqx4\") on node \"crc\" DevicePath \"\"" Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.775681 4812 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d7bb301b-f2a3-4526-b953-d6aa12d8621c-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 16:49:36 crc kubenswrapper[4812]: E1125 16:49:36.775932 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:37.275920116 +0000 UTC m=+152.116062211 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.781561 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-jwjxx"] Nov 25 16:49:36 crc kubenswrapper[4812]: E1125 16:49:36.781863 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7bb301b-f2a3-4526-b953-d6aa12d8621c" containerName="collect-profiles" Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.781885 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7bb301b-f2a3-4526-b953-d6aa12d8621c" containerName="collect-profiles" Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.782010 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="d7bb301b-f2a3-4526-b953-d6aa12d8621c" containerName="collect-profiles" Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.783011 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jwjxx" Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.787094 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.793315 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jwjxx"] Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.876427 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:36 crc kubenswrapper[4812]: E1125 16:49:36.876660 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:37.376635065 +0000 UTC m=+152.216777170 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.876702 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df156ba1-d2fb-45ea-bffe-4b2bdeb18d72-utilities\") pod \"redhat-marketplace-jwjxx\" (UID: \"df156ba1-d2fb-45ea-bffe-4b2bdeb18d72\") " pod="openshift-marketplace/redhat-marketplace-jwjxx" Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.876769 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df156ba1-d2fb-45ea-bffe-4b2bdeb18d72-catalog-content\") pod \"redhat-marketplace-jwjxx\" (UID: \"df156ba1-d2fb-45ea-bffe-4b2bdeb18d72\") " pod="openshift-marketplace/redhat-marketplace-jwjxx" Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.876853 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.877084 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m6fqs\" (UniqueName: \"kubernetes.io/projected/df156ba1-d2fb-45ea-bffe-4b2bdeb18d72-kube-api-access-m6fqs\") pod \"redhat-marketplace-jwjxx\" (UID: \"df156ba1-d2fb-45ea-bffe-4b2bdeb18d72\") " pod="openshift-marketplace/redhat-marketplace-jwjxx" Nov 25 16:49:36 crc kubenswrapper[4812]: E1125 16:49:36.877179 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 16:49:37.37714514 +0000 UTC m=+152.217287235 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-h6c4h" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.978109 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:36 crc kubenswrapper[4812]: E1125 16:49:36.978591 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 16:49:37.478470458 +0000 UTC m=+152.318612553 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.978968 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m6fqs\" (UniqueName: \"kubernetes.io/projected/df156ba1-d2fb-45ea-bffe-4b2bdeb18d72-kube-api-access-m6fqs\") pod \"redhat-marketplace-jwjxx\" (UID: \"df156ba1-d2fb-45ea-bffe-4b2bdeb18d72\") " pod="openshift-marketplace/redhat-marketplace-jwjxx" Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.979262 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df156ba1-d2fb-45ea-bffe-4b2bdeb18d72-utilities\") pod \"redhat-marketplace-jwjxx\" (UID: \"df156ba1-d2fb-45ea-bffe-4b2bdeb18d72\") " pod="openshift-marketplace/redhat-marketplace-jwjxx" Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.979339 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df156ba1-d2fb-45ea-bffe-4b2bdeb18d72-catalog-content\") pod \"redhat-marketplace-jwjxx\" (UID: \"df156ba1-d2fb-45ea-bffe-4b2bdeb18d72\") " pod="openshift-marketplace/redhat-marketplace-jwjxx" Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.979993 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df156ba1-d2fb-45ea-bffe-4b2bdeb18d72-utilities\") pod \"redhat-marketplace-jwjxx\" (UID: \"df156ba1-d2fb-45ea-bffe-4b2bdeb18d72\") " pod="openshift-marketplace/redhat-marketplace-jwjxx" Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.980131 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df156ba1-d2fb-45ea-bffe-4b2bdeb18d72-catalog-content\") pod \"redhat-marketplace-jwjxx\" (UID: \"df156ba1-d2fb-45ea-bffe-4b2bdeb18d72\") " pod="openshift-marketplace/redhat-marketplace-jwjxx" Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.985936 4812 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-25T16:49:36.059873893Z","Handler":null,"Name":""} Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.988608 4812 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 25 16:49:36 crc kubenswrapper[4812]: I1125 16:49:36.988647 4812 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.002721 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m6fqs\" (UniqueName: \"kubernetes.io/projected/df156ba1-d2fb-45ea-bffe-4b2bdeb18d72-kube-api-access-m6fqs\") pod \"redhat-marketplace-jwjxx\" (UID: \"df156ba1-d2fb-45ea-bffe-4b2bdeb18d72\") " pod="openshift-marketplace/redhat-marketplace-jwjxx" Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.080459 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.084502 4812 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.084561 4812 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.099849 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jwjxx" Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.110709 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-h6c4h\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.184857 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.191448 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-zn9qf"] Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.194151 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zn9qf" Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.203131 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.204114 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zn9qf"] Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.286513 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2qr9s\" (UniqueName: \"kubernetes.io/projected/20e948a0-16c2-48f7-a9ec-a70685feda00-kube-api-access-2qr9s\") pod \"redhat-marketplace-zn9qf\" (UID: \"20e948a0-16c2-48f7-a9ec-a70685feda00\") " pod="openshift-marketplace/redhat-marketplace-zn9qf" Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.286570 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20e948a0-16c2-48f7-a9ec-a70685feda00-utilities\") pod \"redhat-marketplace-zn9qf\" (UID: \"20e948a0-16c2-48f7-a9ec-a70685feda00\") " pod="openshift-marketplace/redhat-marketplace-zn9qf" Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.286602 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20e948a0-16c2-48f7-a9ec-a70685feda00-catalog-content\") pod \"redhat-marketplace-zn9qf\" (UID: \"20e948a0-16c2-48f7-a9ec-a70685feda00\") " pod="openshift-marketplace/redhat-marketplace-zn9qf" Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.295864 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-nj6w8" Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.296149 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-nj6w8" Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.297618 4812 patch_prober.go:28] interesting pod/console-f9d7485db-nj6w8 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.22:8443/health\": dial tcp 10.217.0.22:8443: connect: connection refused" start-of-body= Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.297682 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-nj6w8" podUID="f139c50a-da2a-4407-a9dd-ccaabc7e5dcf" containerName="console" probeResult="failure" output="Get \"https://10.217.0.22:8443/health\": dial tcp 10.217.0.22:8443: connect: connection refused" Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.376703 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.387813 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20e948a0-16c2-48f7-a9ec-a70685feda00-catalog-content\") pod \"redhat-marketplace-zn9qf\" (UID: \"20e948a0-16c2-48f7-a9ec-a70685feda00\") " pod="openshift-marketplace/redhat-marketplace-zn9qf" Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.388075 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2qr9s\" (UniqueName: \"kubernetes.io/projected/20e948a0-16c2-48f7-a9ec-a70685feda00-kube-api-access-2qr9s\") pod \"redhat-marketplace-zn9qf\" (UID: \"20e948a0-16c2-48f7-a9ec-a70685feda00\") " pod="openshift-marketplace/redhat-marketplace-zn9qf" Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.388111 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20e948a0-16c2-48f7-a9ec-a70685feda00-utilities\") pod \"redhat-marketplace-zn9qf\" (UID: \"20e948a0-16c2-48f7-a9ec-a70685feda00\") " pod="openshift-marketplace/redhat-marketplace-zn9qf" Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.389157 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20e948a0-16c2-48f7-a9ec-a70685feda00-catalog-content\") pod \"redhat-marketplace-zn9qf\" (UID: \"20e948a0-16c2-48f7-a9ec-a70685feda00\") " pod="openshift-marketplace/redhat-marketplace-zn9qf" Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.389171 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20e948a0-16c2-48f7-a9ec-a70685feda00-utilities\") pod \"redhat-marketplace-zn9qf\" (UID: \"20e948a0-16c2-48f7-a9ec-a70685feda00\") " pod="openshift-marketplace/redhat-marketplace-zn9qf" Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.400779 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-t7x6w" event={"ID":"3da5ea57-7db8-4799-b1eb-b18e1e9185ef","Type":"ContainerStarted","Data":"7d412174fe1d1b7fb7602f7c812b969ad8eaeae2187d6affbff3fb9a38d0f066"} Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.411157 4812 generic.go:334] "Generic (PLEG): container finished" podID="56af28bf-996b-442f-9caa-87f498b292cf" containerID="e295a6c68fab1b5a986c563cc49aff9e5b031db56d03e1f93694d7ea07e2c89a" exitCode=0 Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.411635 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2qr9s\" (UniqueName: \"kubernetes.io/projected/20e948a0-16c2-48f7-a9ec-a70685feda00-kube-api-access-2qr9s\") pod \"redhat-marketplace-zn9qf\" (UID: \"20e948a0-16c2-48f7-a9ec-a70685feda00\") " pod="openshift-marketplace/redhat-marketplace-zn9qf" Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.411655 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mdzhn" event={"ID":"56af28bf-996b-442f-9caa-87f498b292cf","Type":"ContainerDied","Data":"e295a6c68fab1b5a986c563cc49aff9e5b031db56d03e1f93694d7ea07e2c89a"} Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.414007 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401485-brhb9" event={"ID":"d7bb301b-f2a3-4526-b953-d6aa12d8621c","Type":"ContainerDied","Data":"b3672e31e01e94a18edf322a8cd41f6835fdd4f1116785390e7b20ed43561605"} Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.414060 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b3672e31e01e94a18edf322a8cd41f6835fdd4f1116785390e7b20ed43561605" Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.414092 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401485-brhb9" Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.428756 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-t7x6w" podStartSLOduration=12.428738219 podStartE2EDuration="12.428738219s" podCreationTimestamp="2025-11-25 16:49:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:37.424116765 +0000 UTC m=+152.264258860" watchObservedRunningTime="2025-11-25 16:49:37.428738219 +0000 UTC m=+152.268880314" Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.534481 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zn9qf" Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.546285 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jwjxx"] Nov 25 16:49:37 crc kubenswrapper[4812]: W1125 16:49:37.569463 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddf156ba1_d2fb_45ea_bffe_4b2bdeb18d72.slice/crio-4ac84f42ad6ea108025a4a3a8cddcdf94c9261f81b485df74e4dfbb1df961b1b WatchSource:0}: Error finding container 4ac84f42ad6ea108025a4a3a8cddcdf94c9261f81b485df74e4dfbb1df961b1b: Status 404 returned error can't find the container with id 4ac84f42ad6ea108025a4a3a8cddcdf94c9261f81b485df74e4dfbb1df961b1b Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.573993 4812 patch_prober.go:28] interesting pod/downloads-7954f5f757-dd95m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.574030 4812 patch_prober.go:28] interesting pod/downloads-7954f5f757-dd95m container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.574042 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-dd95m" podUID="135cae13-5b75-4d98-9c17-61448faddf90" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.574118 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-dd95m" podUID="135cae13-5b75-4d98-9c17-61448faddf90" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.611127 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-pxgkd" Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.616276 4812 patch_prober.go:28] interesting pod/router-default-5444994796-pxgkd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 16:49:37 crc kubenswrapper[4812]: [-]has-synced failed: reason withheld Nov 25 16:49:37 crc kubenswrapper[4812]: [+]process-running ok Nov 25 16:49:37 crc kubenswrapper[4812]: healthz check failed Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.616406 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pxgkd" podUID="64f34f9a-3fdc-492d-a75f-93e4a3a8727f" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.683966 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-h6c4h"] Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.760702 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-rqlx6" Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.761496 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-rqlx6" Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.769020 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-rqlx6" Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.792868 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-pfrp6"] Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.796323 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pfrp6" Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.803683 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pfrp6"] Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.806617 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.846696 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.849577 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zn9qf"] Nov 25 16:49:37 crc kubenswrapper[4812]: W1125 16:49:37.898332 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod20e948a0_16c2_48f7_a9ec_a70685feda00.slice/crio-a669686fc0843e3c4a10544b8e649e115435964bcbcb5361da49c6d49000455c WatchSource:0}: Error finding container a669686fc0843e3c4a10544b8e649e115435964bcbcb5361da49c6d49000455c: Status 404 returned error can't find the container with id a669686fc0843e3c4a10544b8e649e115435964bcbcb5361da49c6d49000455c Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.986878 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:37 crc kubenswrapper[4812]: I1125 16:49:37.987067 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.000375 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f2cd8a2b-d216-49b7-b86c-fa6b743f238d-utilities\") pod \"redhat-operators-pfrp6\" (UID: \"f2cd8a2b-d216-49b7-b86c-fa6b743f238d\") " pod="openshift-marketplace/redhat-operators-pfrp6" Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.000470 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mrxgh\" (UniqueName: \"kubernetes.io/projected/f2cd8a2b-d216-49b7-b86c-fa6b743f238d-kube-api-access-mrxgh\") pod \"redhat-operators-pfrp6\" (UID: \"f2cd8a2b-d216-49b7-b86c-fa6b743f238d\") " pod="openshift-marketplace/redhat-operators-pfrp6" Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.000493 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.000498 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f2cd8a2b-d216-49b7-b86c-fa6b743f238d-catalog-content\") pod \"redhat-operators-pfrp6\" (UID: \"f2cd8a2b-d216-49b7-b86c-fa6b743f238d\") " pod="openshift-marketplace/redhat-operators-pfrp6" Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.102094 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f2cd8a2b-d216-49b7-b86c-fa6b743f238d-catalog-content\") pod \"redhat-operators-pfrp6\" (UID: \"f2cd8a2b-d216-49b7-b86c-fa6b743f238d\") " pod="openshift-marketplace/redhat-operators-pfrp6" Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.102622 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f2cd8a2b-d216-49b7-b86c-fa6b743f238d-utilities\") pod \"redhat-operators-pfrp6\" (UID: \"f2cd8a2b-d216-49b7-b86c-fa6b743f238d\") " pod="openshift-marketplace/redhat-operators-pfrp6" Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.103277 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f2cd8a2b-d216-49b7-b86c-fa6b743f238d-utilities\") pod \"redhat-operators-pfrp6\" (UID: \"f2cd8a2b-d216-49b7-b86c-fa6b743f238d\") " pod="openshift-marketplace/redhat-operators-pfrp6" Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.103351 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f2cd8a2b-d216-49b7-b86c-fa6b743f238d-catalog-content\") pod \"redhat-operators-pfrp6\" (UID: \"f2cd8a2b-d216-49b7-b86c-fa6b743f238d\") " pod="openshift-marketplace/redhat-operators-pfrp6" Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.104082 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mrxgh\" (UniqueName: \"kubernetes.io/projected/f2cd8a2b-d216-49b7-b86c-fa6b743f238d-kube-api-access-mrxgh\") pod \"redhat-operators-pfrp6\" (UID: \"f2cd8a2b-d216-49b7-b86c-fa6b743f238d\") " pod="openshift-marketplace/redhat-operators-pfrp6" Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.132418 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mrxgh\" (UniqueName: \"kubernetes.io/projected/f2cd8a2b-d216-49b7-b86c-fa6b743f238d-kube-api-access-mrxgh\") pod \"redhat-operators-pfrp6\" (UID: \"f2cd8a2b-d216-49b7-b86c-fa6b743f238d\") " pod="openshift-marketplace/redhat-operators-pfrp6" Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.148746 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.151142 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.159405 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.160702 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.160881 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.179675 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-chhxl"] Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.184463 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-chhxl" Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.225688 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-chhxl"] Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.307380 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7c3946cb-ff76-48e1-a548-143540c9772c-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"7c3946cb-ff76-48e1-a548-143540c9772c\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.307493 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pd85m\" (UniqueName: \"kubernetes.io/projected/85faff28-66f5-478c-a85a-5e6c26a50106-kube-api-access-pd85m\") pod \"redhat-operators-chhxl\" (UID: \"85faff28-66f5-478c-a85a-5e6c26a50106\") " pod="openshift-marketplace/redhat-operators-chhxl" Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.307552 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85faff28-66f5-478c-a85a-5e6c26a50106-utilities\") pod \"redhat-operators-chhxl\" (UID: \"85faff28-66f5-478c-a85a-5e6c26a50106\") " pod="openshift-marketplace/redhat-operators-chhxl" Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.307662 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85faff28-66f5-478c-a85a-5e6c26a50106-catalog-content\") pod \"redhat-operators-chhxl\" (UID: \"85faff28-66f5-478c-a85a-5e6c26a50106\") " pod="openshift-marketplace/redhat-operators-chhxl" Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.307794 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7c3946cb-ff76-48e1-a548-143540c9772c-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"7c3946cb-ff76-48e1-a548-143540c9772c\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.408649 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pd85m\" (UniqueName: \"kubernetes.io/projected/85faff28-66f5-478c-a85a-5e6c26a50106-kube-api-access-pd85m\") pod \"redhat-operators-chhxl\" (UID: \"85faff28-66f5-478c-a85a-5e6c26a50106\") " pod="openshift-marketplace/redhat-operators-chhxl" Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.408727 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85faff28-66f5-478c-a85a-5e6c26a50106-utilities\") pod \"redhat-operators-chhxl\" (UID: \"85faff28-66f5-478c-a85a-5e6c26a50106\") " pod="openshift-marketplace/redhat-operators-chhxl" Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.408750 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85faff28-66f5-478c-a85a-5e6c26a50106-catalog-content\") pod \"redhat-operators-chhxl\" (UID: \"85faff28-66f5-478c-a85a-5e6c26a50106\") " pod="openshift-marketplace/redhat-operators-chhxl" Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.408776 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7c3946cb-ff76-48e1-a548-143540c9772c-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"7c3946cb-ff76-48e1-a548-143540c9772c\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.408801 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7c3946cb-ff76-48e1-a548-143540c9772c-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"7c3946cb-ff76-48e1-a548-143540c9772c\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.409045 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7c3946cb-ff76-48e1-a548-143540c9772c-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"7c3946cb-ff76-48e1-a548-143540c9772c\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.409635 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85faff28-66f5-478c-a85a-5e6c26a50106-catalog-content\") pod \"redhat-operators-chhxl\" (UID: \"85faff28-66f5-478c-a85a-5e6c26a50106\") " pod="openshift-marketplace/redhat-operators-chhxl" Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.409687 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85faff28-66f5-478c-a85a-5e6c26a50106-utilities\") pod \"redhat-operators-chhxl\" (UID: \"85faff28-66f5-478c-a85a-5e6c26a50106\") " pod="openshift-marketplace/redhat-operators-chhxl" Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.426135 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pfrp6" Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.426416 4812 generic.go:334] "Generic (PLEG): container finished" podID="df156ba1-d2fb-45ea-bffe-4b2bdeb18d72" containerID="ea387f5037f4cbbcc6e64cbac29bb43c0f842f6a45a6dfc79322f4e6c0bb1777" exitCode=0 Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.426477 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jwjxx" event={"ID":"df156ba1-d2fb-45ea-bffe-4b2bdeb18d72","Type":"ContainerDied","Data":"ea387f5037f4cbbcc6e64cbac29bb43c0f842f6a45a6dfc79322f4e6c0bb1777"} Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.426499 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jwjxx" event={"ID":"df156ba1-d2fb-45ea-bffe-4b2bdeb18d72","Type":"ContainerStarted","Data":"4ac84f42ad6ea108025a4a3a8cddcdf94c9261f81b485df74e4dfbb1df961b1b"} Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.431796 4812 generic.go:334] "Generic (PLEG): container finished" podID="20e948a0-16c2-48f7-a9ec-a70685feda00" containerID="ad6f6afbfa25c5d494194b9bbadfa310e05ceb7e6e7a556b64280d94aa5deb75" exitCode=0 Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.431910 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zn9qf" event={"ID":"20e948a0-16c2-48f7-a9ec-a70685feda00","Type":"ContainerDied","Data":"ad6f6afbfa25c5d494194b9bbadfa310e05ceb7e6e7a556b64280d94aa5deb75"} Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.431943 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zn9qf" event={"ID":"20e948a0-16c2-48f7-a9ec-a70685feda00","Type":"ContainerStarted","Data":"a669686fc0843e3c4a10544b8e649e115435964bcbcb5361da49c6d49000455c"} Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.432608 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pd85m\" (UniqueName: \"kubernetes.io/projected/85faff28-66f5-478c-a85a-5e6c26a50106-kube-api-access-pd85m\") pod \"redhat-operators-chhxl\" (UID: \"85faff28-66f5-478c-a85a-5e6c26a50106\") " pod="openshift-marketplace/redhat-operators-chhxl" Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.435654 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7c3946cb-ff76-48e1-a548-143540c9772c-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"7c3946cb-ff76-48e1-a548-143540c9772c\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.436010 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8p5fc" Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.437195 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-v7lp8" Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.442930 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" event={"ID":"3c23f6ed-bcde-4571-b631-c90ce20d9348","Type":"ContainerStarted","Data":"e346ab79554d0cac3f20a6f0afce29109a420164ac164470405fd4689adaf40e"} Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.442976 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" event={"ID":"3c23f6ed-bcde-4571-b631-c90ce20d9348","Type":"ContainerStarted","Data":"ac5047a5a8814bb6c280a6a51c0d9c55acf2cffab53c613aaf234a99ba289ba7"} Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.455982 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-rqlx6" Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.461769 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-4chwd" Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.481890 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.520674 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" podStartSLOduration=131.520656036 podStartE2EDuration="2m11.520656036s" podCreationTimestamp="2025-11-25 16:47:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:38.519492829 +0000 UTC m=+153.359634944" watchObservedRunningTime="2025-11-25 16:49:38.520656036 +0000 UTC m=+153.360798131" Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.541278 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-chhxl" Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.616382 4812 patch_prober.go:28] interesting pod/router-default-5444994796-pxgkd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 16:49:38 crc kubenswrapper[4812]: [-]has-synced failed: reason withheld Nov 25 16:49:38 crc kubenswrapper[4812]: [+]process-running ok Nov 25 16:49:38 crc kubenswrapper[4812]: healthz check failed Nov 25 16:49:38 crc kubenswrapper[4812]: I1125 16:49:38.616447 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pxgkd" podUID="64f34f9a-3fdc-492d-a75f-93e4a3a8727f" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 16:49:39 crc kubenswrapper[4812]: I1125 16:49:39.006991 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pfrp6"] Nov 25 16:49:39 crc kubenswrapper[4812]: I1125 16:49:39.066364 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 25 16:49:39 crc kubenswrapper[4812]: I1125 16:49:39.322597 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-chhxl"] Nov 25 16:49:39 crc kubenswrapper[4812]: W1125 16:49:39.345538 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod85faff28_66f5_478c_a85a_5e6c26a50106.slice/crio-a23b1cde1b3f34d7d93cf81b922fa051e4162404ca87f43d7246738ef571317f WatchSource:0}: Error finding container a23b1cde1b3f34d7d93cf81b922fa051e4162404ca87f43d7246738ef571317f: Status 404 returned error can't find the container with id a23b1cde1b3f34d7d93cf81b922fa051e4162404ca87f43d7246738ef571317f Nov 25 16:49:39 crc kubenswrapper[4812]: I1125 16:49:39.454704 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pfrp6" event={"ID":"f2cd8a2b-d216-49b7-b86c-fa6b743f238d","Type":"ContainerStarted","Data":"1c12790b30530d7d3d9766abbe361c31f9ca399242b36b593b084abc534a0357"} Nov 25 16:49:39 crc kubenswrapper[4812]: I1125 16:49:39.480721 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"7c3946cb-ff76-48e1-a548-143540c9772c","Type":"ContainerStarted","Data":"56cb01c989f5e34b4cf46b97c9880300b8a5c23a0b7ca7cd582e40e72f4cddfa"} Nov 25 16:49:39 crc kubenswrapper[4812]: I1125 16:49:39.485033 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-chhxl" event={"ID":"85faff28-66f5-478c-a85a-5e6c26a50106","Type":"ContainerStarted","Data":"a23b1cde1b3f34d7d93cf81b922fa051e4162404ca87f43d7246738ef571317f"} Nov 25 16:49:39 crc kubenswrapper[4812]: I1125 16:49:39.485572 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:39 crc kubenswrapper[4812]: I1125 16:49:39.617214 4812 patch_prober.go:28] interesting pod/router-default-5444994796-pxgkd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 16:49:39 crc kubenswrapper[4812]: [-]has-synced failed: reason withheld Nov 25 16:49:39 crc kubenswrapper[4812]: [+]process-running ok Nov 25 16:49:39 crc kubenswrapper[4812]: healthz check failed Nov 25 16:49:39 crc kubenswrapper[4812]: I1125 16:49:39.617289 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pxgkd" podUID="64f34f9a-3fdc-492d-a75f-93e4a3a8727f" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 16:49:39 crc kubenswrapper[4812]: I1125 16:49:39.807858 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 25 16:49:39 crc kubenswrapper[4812]: I1125 16:49:39.810489 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 16:49:39 crc kubenswrapper[4812]: I1125 16:49:39.814736 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 25 16:49:39 crc kubenswrapper[4812]: I1125 16:49:39.815117 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 25 16:49:39 crc kubenswrapper[4812]: I1125 16:49:39.818583 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 25 16:49:39 crc kubenswrapper[4812]: I1125 16:49:39.955021 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4b8765a2-e7c2-4810-b7a8-dcca6732d618-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"4b8765a2-e7c2-4810-b7a8-dcca6732d618\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 16:49:39 crc kubenswrapper[4812]: I1125 16:49:39.955098 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4b8765a2-e7c2-4810-b7a8-dcca6732d618-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"4b8765a2-e7c2-4810-b7a8-dcca6732d618\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 16:49:40 crc kubenswrapper[4812]: I1125 16:49:40.056608 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4b8765a2-e7c2-4810-b7a8-dcca6732d618-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"4b8765a2-e7c2-4810-b7a8-dcca6732d618\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 16:49:40 crc kubenswrapper[4812]: I1125 16:49:40.056714 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4b8765a2-e7c2-4810-b7a8-dcca6732d618-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"4b8765a2-e7c2-4810-b7a8-dcca6732d618\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 16:49:40 crc kubenswrapper[4812]: I1125 16:49:40.056898 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4b8765a2-e7c2-4810-b7a8-dcca6732d618-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"4b8765a2-e7c2-4810-b7a8-dcca6732d618\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 16:49:40 crc kubenswrapper[4812]: I1125 16:49:40.082943 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4b8765a2-e7c2-4810-b7a8-dcca6732d618-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"4b8765a2-e7c2-4810-b7a8-dcca6732d618\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 16:49:40 crc kubenswrapper[4812]: I1125 16:49:40.132837 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 16:49:40 crc kubenswrapper[4812]: I1125 16:49:40.499598 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 25 16:49:40 crc kubenswrapper[4812]: I1125 16:49:40.505584 4812 generic.go:334] "Generic (PLEG): container finished" podID="f2cd8a2b-d216-49b7-b86c-fa6b743f238d" containerID="09584a08858e53215fa916af4d21ac88819a1741aa3132cbd9e60b1b4d97752a" exitCode=0 Nov 25 16:49:40 crc kubenswrapper[4812]: I1125 16:49:40.505700 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pfrp6" event={"ID":"f2cd8a2b-d216-49b7-b86c-fa6b743f238d","Type":"ContainerDied","Data":"09584a08858e53215fa916af4d21ac88819a1741aa3132cbd9e60b1b4d97752a"} Nov 25 16:49:40 crc kubenswrapper[4812]: I1125 16:49:40.509295 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"7c3946cb-ff76-48e1-a548-143540c9772c","Type":"ContainerStarted","Data":"1109e19989311cac2d31876610dc6cd7de0a6bdc540c293a7d8b9caace75c9e7"} Nov 25 16:49:40 crc kubenswrapper[4812]: I1125 16:49:40.513138 4812 generic.go:334] "Generic (PLEG): container finished" podID="85faff28-66f5-478c-a85a-5e6c26a50106" containerID="e1c6488f3d380b5840e80ce37cf5ba08f6aa93a719b7830bddf00c5aad8d099c" exitCode=0 Nov 25 16:49:40 crc kubenswrapper[4812]: I1125 16:49:40.513499 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-chhxl" event={"ID":"85faff28-66f5-478c-a85a-5e6c26a50106","Type":"ContainerDied","Data":"e1c6488f3d380b5840e80ce37cf5ba08f6aa93a719b7830bddf00c5aad8d099c"} Nov 25 16:49:40 crc kubenswrapper[4812]: W1125 16:49:40.533811 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod4b8765a2_e7c2_4810_b7a8_dcca6732d618.slice/crio-84c3e17020c33e9fc260193f99bffb329b631e5b7e89e68d15e42fbafcfb9aca WatchSource:0}: Error finding container 84c3e17020c33e9fc260193f99bffb329b631e5b7e89e68d15e42fbafcfb9aca: Status 404 returned error can't find the container with id 84c3e17020c33e9fc260193f99bffb329b631e5b7e89e68d15e42fbafcfb9aca Nov 25 16:49:40 crc kubenswrapper[4812]: I1125 16:49:40.614253 4812 patch_prober.go:28] interesting pod/router-default-5444994796-pxgkd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 16:49:40 crc kubenswrapper[4812]: [-]has-synced failed: reason withheld Nov 25 16:49:40 crc kubenswrapper[4812]: [+]process-running ok Nov 25 16:49:40 crc kubenswrapper[4812]: healthz check failed Nov 25 16:49:40 crc kubenswrapper[4812]: I1125 16:49:40.614946 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pxgkd" podUID="64f34f9a-3fdc-492d-a75f-93e4a3a8727f" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 16:49:41 crc kubenswrapper[4812]: I1125 16:49:41.521544 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"4b8765a2-e7c2-4810-b7a8-dcca6732d618","Type":"ContainerStarted","Data":"84c3e17020c33e9fc260193f99bffb329b631e5b7e89e68d15e42fbafcfb9aca"} Nov 25 16:49:41 crc kubenswrapper[4812]: I1125 16:49:41.570139 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=3.570123673 podStartE2EDuration="3.570123673s" podCreationTimestamp="2025-11-25 16:49:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:49:41.567921735 +0000 UTC m=+156.408063830" watchObservedRunningTime="2025-11-25 16:49:41.570123673 +0000 UTC m=+156.410265768" Nov 25 16:49:41 crc kubenswrapper[4812]: I1125 16:49:41.611790 4812 patch_prober.go:28] interesting pod/router-default-5444994796-pxgkd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 16:49:41 crc kubenswrapper[4812]: [-]has-synced failed: reason withheld Nov 25 16:49:41 crc kubenswrapper[4812]: [+]process-running ok Nov 25 16:49:41 crc kubenswrapper[4812]: healthz check failed Nov 25 16:49:41 crc kubenswrapper[4812]: I1125 16:49:41.611845 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pxgkd" podUID="64f34f9a-3fdc-492d-a75f-93e4a3a8727f" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 16:49:42 crc kubenswrapper[4812]: I1125 16:49:42.556318 4812 generic.go:334] "Generic (PLEG): container finished" podID="7c3946cb-ff76-48e1-a548-143540c9772c" containerID="1109e19989311cac2d31876610dc6cd7de0a6bdc540c293a7d8b9caace75c9e7" exitCode=0 Nov 25 16:49:42 crc kubenswrapper[4812]: I1125 16:49:42.556426 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"7c3946cb-ff76-48e1-a548-143540c9772c","Type":"ContainerDied","Data":"1109e19989311cac2d31876610dc6cd7de0a6bdc540c293a7d8b9caace75c9e7"} Nov 25 16:49:42 crc kubenswrapper[4812]: I1125 16:49:42.559165 4812 generic.go:334] "Generic (PLEG): container finished" podID="4b8765a2-e7c2-4810-b7a8-dcca6732d618" containerID="d76f9a56f414b3af36776dcd514e0903837b7edaf924996830e8d08be3893fc9" exitCode=0 Nov 25 16:49:42 crc kubenswrapper[4812]: I1125 16:49:42.559210 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"4b8765a2-e7c2-4810-b7a8-dcca6732d618","Type":"ContainerDied","Data":"d76f9a56f414b3af36776dcd514e0903837b7edaf924996830e8d08be3893fc9"} Nov 25 16:49:42 crc kubenswrapper[4812]: I1125 16:49:42.614205 4812 patch_prober.go:28] interesting pod/router-default-5444994796-pxgkd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 16:49:42 crc kubenswrapper[4812]: [-]has-synced failed: reason withheld Nov 25 16:49:42 crc kubenswrapper[4812]: [+]process-running ok Nov 25 16:49:42 crc kubenswrapper[4812]: healthz check failed Nov 25 16:49:42 crc kubenswrapper[4812]: I1125 16:49:42.614259 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pxgkd" podUID="64f34f9a-3fdc-492d-a75f-93e4a3a8727f" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 16:49:43 crc kubenswrapper[4812]: I1125 16:49:43.546864 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-xv59p" Nov 25 16:49:43 crc kubenswrapper[4812]: I1125 16:49:43.615616 4812 patch_prober.go:28] interesting pod/router-default-5444994796-pxgkd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 16:49:43 crc kubenswrapper[4812]: [-]has-synced failed: reason withheld Nov 25 16:49:43 crc kubenswrapper[4812]: [+]process-running ok Nov 25 16:49:43 crc kubenswrapper[4812]: healthz check failed Nov 25 16:49:43 crc kubenswrapper[4812]: I1125 16:49:43.615700 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pxgkd" podUID="64f34f9a-3fdc-492d-a75f-93e4a3a8727f" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 16:49:44 crc kubenswrapper[4812]: I1125 16:49:44.612447 4812 patch_prober.go:28] interesting pod/router-default-5444994796-pxgkd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 16:49:44 crc kubenswrapper[4812]: [-]has-synced failed: reason withheld Nov 25 16:49:44 crc kubenswrapper[4812]: [+]process-running ok Nov 25 16:49:44 crc kubenswrapper[4812]: healthz check failed Nov 25 16:49:44 crc kubenswrapper[4812]: I1125 16:49:44.612807 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pxgkd" podUID="64f34f9a-3fdc-492d-a75f-93e4a3a8727f" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 16:49:45 crc kubenswrapper[4812]: I1125 16:49:45.611574 4812 patch_prober.go:28] interesting pod/router-default-5444994796-pxgkd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 16:49:45 crc kubenswrapper[4812]: [-]has-synced failed: reason withheld Nov 25 16:49:45 crc kubenswrapper[4812]: [+]process-running ok Nov 25 16:49:45 crc kubenswrapper[4812]: healthz check failed Nov 25 16:49:45 crc kubenswrapper[4812]: I1125 16:49:45.611728 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pxgkd" podUID="64f34f9a-3fdc-492d-a75f-93e4a3a8727f" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 16:49:46 crc kubenswrapper[4812]: I1125 16:49:46.611299 4812 patch_prober.go:28] interesting pod/router-default-5444994796-pxgkd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 16:49:46 crc kubenswrapper[4812]: [-]has-synced failed: reason withheld Nov 25 16:49:46 crc kubenswrapper[4812]: [+]process-running ok Nov 25 16:49:46 crc kubenswrapper[4812]: healthz check failed Nov 25 16:49:46 crc kubenswrapper[4812]: I1125 16:49:46.611723 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pxgkd" podUID="64f34f9a-3fdc-492d-a75f-93e4a3a8727f" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 16:49:47 crc kubenswrapper[4812]: I1125 16:49:47.295899 4812 patch_prober.go:28] interesting pod/console-f9d7485db-nj6w8 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.22:8443/health\": dial tcp 10.217.0.22:8443: connect: connection refused" start-of-body= Nov 25 16:49:47 crc kubenswrapper[4812]: I1125 16:49:47.295948 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-nj6w8" podUID="f139c50a-da2a-4407-a9dd-ccaabc7e5dcf" containerName="console" probeResult="failure" output="Get \"https://10.217.0.22:8443/health\": dial tcp 10.217.0.22:8443: connect: connection refused" Nov 25 16:49:47 crc kubenswrapper[4812]: I1125 16:49:47.574967 4812 patch_prober.go:28] interesting pod/downloads-7954f5f757-dd95m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Nov 25 16:49:47 crc kubenswrapper[4812]: I1125 16:49:47.575028 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-dd95m" podUID="135cae13-5b75-4d98-9c17-61448faddf90" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Nov 25 16:49:47 crc kubenswrapper[4812]: I1125 16:49:47.574976 4812 patch_prober.go:28] interesting pod/downloads-7954f5f757-dd95m container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Nov 25 16:49:47 crc kubenswrapper[4812]: I1125 16:49:47.575133 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-dd95m" podUID="135cae13-5b75-4d98-9c17-61448faddf90" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Nov 25 16:49:47 crc kubenswrapper[4812]: I1125 16:49:47.611983 4812 patch_prober.go:28] interesting pod/router-default-5444994796-pxgkd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 16:49:47 crc kubenswrapper[4812]: [-]has-synced failed: reason withheld Nov 25 16:49:47 crc kubenswrapper[4812]: [+]process-running ok Nov 25 16:49:47 crc kubenswrapper[4812]: healthz check failed Nov 25 16:49:47 crc kubenswrapper[4812]: I1125 16:49:47.612044 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pxgkd" podUID="64f34f9a-3fdc-492d-a75f-93e4a3a8727f" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 16:49:48 crc kubenswrapper[4812]: I1125 16:49:48.611111 4812 patch_prober.go:28] interesting pod/router-default-5444994796-pxgkd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 16:49:48 crc kubenswrapper[4812]: [+]has-synced ok Nov 25 16:49:48 crc kubenswrapper[4812]: [+]process-running ok Nov 25 16:49:48 crc kubenswrapper[4812]: healthz check failed Nov 25 16:49:48 crc kubenswrapper[4812]: I1125 16:49:48.611168 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pxgkd" podUID="64f34f9a-3fdc-492d-a75f-93e4a3a8727f" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 16:49:49 crc kubenswrapper[4812]: I1125 16:49:49.505255 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fbb57832-3993-492b-80c9-a6a61891a125-metrics-certs\") pod \"network-metrics-daemon-82fvc\" (UID: \"fbb57832-3993-492b-80c9-a6a61891a125\") " pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:49:49 crc kubenswrapper[4812]: I1125 16:49:49.512408 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fbb57832-3993-492b-80c9-a6a61891a125-metrics-certs\") pod \"network-metrics-daemon-82fvc\" (UID: \"fbb57832-3993-492b-80c9-a6a61891a125\") " pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:49:49 crc kubenswrapper[4812]: I1125 16:49:49.544156 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-82fvc" Nov 25 16:49:49 crc kubenswrapper[4812]: I1125 16:49:49.612842 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-pxgkd" Nov 25 16:49:49 crc kubenswrapper[4812]: I1125 16:49:49.615511 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-pxgkd" Nov 25 16:49:50 crc kubenswrapper[4812]: I1125 16:49:50.264653 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 16:49:50 crc kubenswrapper[4812]: I1125 16:49:50.416914 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7c3946cb-ff76-48e1-a548-143540c9772c-kube-api-access\") pod \"7c3946cb-ff76-48e1-a548-143540c9772c\" (UID: \"7c3946cb-ff76-48e1-a548-143540c9772c\") " Nov 25 16:49:50 crc kubenswrapper[4812]: I1125 16:49:50.417586 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7c3946cb-ff76-48e1-a548-143540c9772c-kubelet-dir\") pod \"7c3946cb-ff76-48e1-a548-143540c9772c\" (UID: \"7c3946cb-ff76-48e1-a548-143540c9772c\") " Nov 25 16:49:50 crc kubenswrapper[4812]: I1125 16:49:50.417850 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7c3946cb-ff76-48e1-a548-143540c9772c-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "7c3946cb-ff76-48e1-a548-143540c9772c" (UID: "7c3946cb-ff76-48e1-a548-143540c9772c"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 16:49:50 crc kubenswrapper[4812]: I1125 16:49:50.418132 4812 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7c3946cb-ff76-48e1-a548-143540c9772c-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 16:49:50 crc kubenswrapper[4812]: I1125 16:49:50.421094 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c3946cb-ff76-48e1-a548-143540c9772c-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "7c3946cb-ff76-48e1-a548-143540c9772c" (UID: "7c3946cb-ff76-48e1-a548-143540c9772c"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:49:50 crc kubenswrapper[4812]: I1125 16:49:50.520015 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7c3946cb-ff76-48e1-a548-143540c9772c-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 16:49:50 crc kubenswrapper[4812]: I1125 16:49:50.620439 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 16:49:50 crc kubenswrapper[4812]: I1125 16:49:50.620470 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"7c3946cb-ff76-48e1-a548-143540c9772c","Type":"ContainerDied","Data":"56cb01c989f5e34b4cf46b97c9880300b8a5c23a0b7ca7cd582e40e72f4cddfa"} Nov 25 16:49:50 crc kubenswrapper[4812]: I1125 16:49:50.620546 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="56cb01c989f5e34b4cf46b97c9880300b8a5c23a0b7ca7cd582e40e72f4cddfa" Nov 25 16:49:53 crc kubenswrapper[4812]: I1125 16:49:53.579255 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 16:49:53 crc kubenswrapper[4812]: I1125 16:49:53.643093 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"4b8765a2-e7c2-4810-b7a8-dcca6732d618","Type":"ContainerDied","Data":"84c3e17020c33e9fc260193f99bffb329b631e5b7e89e68d15e42fbafcfb9aca"} Nov 25 16:49:53 crc kubenswrapper[4812]: I1125 16:49:53.643138 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="84c3e17020c33e9fc260193f99bffb329b631e5b7e89e68d15e42fbafcfb9aca" Nov 25 16:49:53 crc kubenswrapper[4812]: I1125 16:49:53.643159 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 16:49:53 crc kubenswrapper[4812]: I1125 16:49:53.757239 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4b8765a2-e7c2-4810-b7a8-dcca6732d618-kubelet-dir\") pod \"4b8765a2-e7c2-4810-b7a8-dcca6732d618\" (UID: \"4b8765a2-e7c2-4810-b7a8-dcca6732d618\") " Nov 25 16:49:53 crc kubenswrapper[4812]: I1125 16:49:53.757386 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4b8765a2-e7c2-4810-b7a8-dcca6732d618-kube-api-access\") pod \"4b8765a2-e7c2-4810-b7a8-dcca6732d618\" (UID: \"4b8765a2-e7c2-4810-b7a8-dcca6732d618\") " Nov 25 16:49:53 crc kubenswrapper[4812]: I1125 16:49:53.757380 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4b8765a2-e7c2-4810-b7a8-dcca6732d618-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "4b8765a2-e7c2-4810-b7a8-dcca6732d618" (UID: "4b8765a2-e7c2-4810-b7a8-dcca6732d618"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 16:49:53 crc kubenswrapper[4812]: I1125 16:49:53.761698 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b8765a2-e7c2-4810-b7a8-dcca6732d618-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "4b8765a2-e7c2-4810-b7a8-dcca6732d618" (UID: "4b8765a2-e7c2-4810-b7a8-dcca6732d618"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:49:53 crc kubenswrapper[4812]: I1125 16:49:53.859511 4812 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4b8765a2-e7c2-4810-b7a8-dcca6732d618-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 16:49:53 crc kubenswrapper[4812]: I1125 16:49:53.859775 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4b8765a2-e7c2-4810-b7a8-dcca6732d618-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 16:49:57 crc kubenswrapper[4812]: I1125 16:49:57.300344 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-nj6w8" Nov 25 16:49:57 crc kubenswrapper[4812]: I1125 16:49:57.304744 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-nj6w8" Nov 25 16:49:57 crc kubenswrapper[4812]: I1125 16:49:57.333132 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:49:57 crc kubenswrapper[4812]: I1125 16:49:57.333218 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:49:57 crc kubenswrapper[4812]: I1125 16:49:57.387748 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:49:57 crc kubenswrapper[4812]: I1125 16:49:57.574459 4812 patch_prober.go:28] interesting pod/downloads-7954f5f757-dd95m container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Nov 25 16:49:57 crc kubenswrapper[4812]: I1125 16:49:57.574467 4812 patch_prober.go:28] interesting pod/downloads-7954f5f757-dd95m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Nov 25 16:49:57 crc kubenswrapper[4812]: I1125 16:49:57.574524 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-dd95m" podUID="135cae13-5b75-4d98-9c17-61448faddf90" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Nov 25 16:49:57 crc kubenswrapper[4812]: I1125 16:49:57.574567 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-dd95m" podUID="135cae13-5b75-4d98-9c17-61448faddf90" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Nov 25 16:49:57 crc kubenswrapper[4812]: I1125 16:49:57.574615 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-dd95m" Nov 25 16:49:57 crc kubenswrapper[4812]: I1125 16:49:57.575054 4812 patch_prober.go:28] interesting pod/downloads-7954f5f757-dd95m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Nov 25 16:49:57 crc kubenswrapper[4812]: I1125 16:49:57.575113 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-dd95m" podUID="135cae13-5b75-4d98-9c17-61448faddf90" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Nov 25 16:49:57 crc kubenswrapper[4812]: I1125 16:49:57.575169 4812 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"a688835dca1f760b8cd52bda38b6b882c7a2a2e4a42bc70e13e1e854a8a9f266"} pod="openshift-console/downloads-7954f5f757-dd95m" containerMessage="Container download-server failed liveness probe, will be restarted" Nov 25 16:49:57 crc kubenswrapper[4812]: I1125 16:49:57.575255 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-dd95m" podUID="135cae13-5b75-4d98-9c17-61448faddf90" containerName="download-server" containerID="cri-o://a688835dca1f760b8cd52bda38b6b882c7a2a2e4a42bc70e13e1e854a8a9f266" gracePeriod=2 Nov 25 16:49:58 crc kubenswrapper[4812]: I1125 16:49:58.656417 4812 patch_prober.go:28] interesting pod/router-default-5444994796-pxgkd container/router namespace/openshift-ingress: Readiness probe status=failure output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 16:49:58 crc kubenswrapper[4812]: I1125 16:49:58.656899 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ingress/router-default-5444994796-pxgkd" podUID="64f34f9a-3fdc-492d-a75f-93e4a3a8727f" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 16:50:01 crc kubenswrapper[4812]: I1125 16:50:01.685353 4812 generic.go:334] "Generic (PLEG): container finished" podID="135cae13-5b75-4d98-9c17-61448faddf90" containerID="a688835dca1f760b8cd52bda38b6b882c7a2a2e4a42bc70e13e1e854a8a9f266" exitCode=0 Nov 25 16:50:01 crc kubenswrapper[4812]: I1125 16:50:01.685399 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-dd95m" event={"ID":"135cae13-5b75-4d98-9c17-61448faddf90","Type":"ContainerDied","Data":"a688835dca1f760b8cd52bda38b6b882c7a2a2e4a42bc70e13e1e854a8a9f266"} Nov 25 16:50:07 crc kubenswrapper[4812]: I1125 16:50:07.575433 4812 patch_prober.go:28] interesting pod/downloads-7954f5f757-dd95m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Nov 25 16:50:07 crc kubenswrapper[4812]: I1125 16:50:07.576239 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-dd95m" podUID="135cae13-5b75-4d98-9c17-61448faddf90" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Nov 25 16:50:08 crc kubenswrapper[4812]: I1125 16:50:08.480496 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-nkjmk" Nov 25 16:50:14 crc kubenswrapper[4812]: I1125 16:50:14.607120 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 16:50:17 crc kubenswrapper[4812]: I1125 16:50:17.575874 4812 patch_prober.go:28] interesting pod/downloads-7954f5f757-dd95m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Nov 25 16:50:17 crc kubenswrapper[4812]: I1125 16:50:17.576311 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-dd95m" podUID="135cae13-5b75-4d98-9c17-61448faddf90" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Nov 25 16:50:24 crc kubenswrapper[4812]: E1125 16:50:24.704238 4812 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 25 16:50:24 crc kubenswrapper[4812]: E1125 16:50:24.704868 4812 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9ms8g,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-whrrz_openshift-marketplace(17f59f39-a958-4cb4-8a6a-679e7f08a13b): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 16:50:24 crc kubenswrapper[4812]: E1125 16:50:24.706103 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-whrrz" podUID="17f59f39-a958-4cb4-8a6a-679e7f08a13b" Nov 25 16:50:27 crc kubenswrapper[4812]: I1125 16:50:27.332951 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:50:27 crc kubenswrapper[4812]: I1125 16:50:27.333225 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:50:27 crc kubenswrapper[4812]: E1125 16:50:27.376649 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-whrrz" podUID="17f59f39-a958-4cb4-8a6a-679e7f08a13b" Nov 25 16:50:27 crc kubenswrapper[4812]: I1125 16:50:27.575262 4812 patch_prober.go:28] interesting pod/downloads-7954f5f757-dd95m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Nov 25 16:50:27 crc kubenswrapper[4812]: I1125 16:50:27.575337 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-dd95m" podUID="135cae13-5b75-4d98-9c17-61448faddf90" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Nov 25 16:50:28 crc kubenswrapper[4812]: E1125 16:50:28.362069 4812 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 25 16:50:28 crc kubenswrapper[4812]: E1125 16:50:28.362279 4812 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-m6fqs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-jwjxx_openshift-marketplace(df156ba1-d2fb-45ea-bffe-4b2bdeb18d72): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 16:50:28 crc kubenswrapper[4812]: E1125 16:50:28.363584 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-jwjxx" podUID="df156ba1-d2fb-45ea-bffe-4b2bdeb18d72" Nov 25 16:50:32 crc kubenswrapper[4812]: E1125 16:50:32.424182 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-jwjxx" podUID="df156ba1-d2fb-45ea-bffe-4b2bdeb18d72" Nov 25 16:50:32 crc kubenswrapper[4812]: I1125 16:50:32.800333 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-82fvc"] Nov 25 16:50:32 crc kubenswrapper[4812]: W1125 16:50:32.804942 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfbb57832_3993_492b_80c9_a6a61891a125.slice/crio-24622c5e3516d18568f90488c84ab592882b59b267aa1abecd11c837af6bde2e WatchSource:0}: Error finding container 24622c5e3516d18568f90488c84ab592882b59b267aa1abecd11c837af6bde2e: Status 404 returned error can't find the container with id 24622c5e3516d18568f90488c84ab592882b59b267aa1abecd11c837af6bde2e Nov 25 16:50:32 crc kubenswrapper[4812]: I1125 16:50:32.878302 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-82fvc" event={"ID":"fbb57832-3993-492b-80c9-a6a61891a125","Type":"ContainerStarted","Data":"24622c5e3516d18568f90488c84ab592882b59b267aa1abecd11c837af6bde2e"} Nov 25 16:50:33 crc kubenswrapper[4812]: I1125 16:50:33.893938 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-dd95m" event={"ID":"135cae13-5b75-4d98-9c17-61448faddf90","Type":"ContainerStarted","Data":"b74773b98861db355ff409bc88e42b05fc60b2d276186e5952bc393edd803f45"} Nov 25 16:50:34 crc kubenswrapper[4812]: E1125 16:50:34.871152 4812 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 25 16:50:34 crc kubenswrapper[4812]: E1125 16:50:34.871630 4812 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-glz4p,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-mdzhn_openshift-marketplace(56af28bf-996b-442f-9caa-87f498b292cf): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 16:50:34 crc kubenswrapper[4812]: E1125 16:50:34.872882 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-mdzhn" podUID="56af28bf-996b-442f-9caa-87f498b292cf" Nov 25 16:50:34 crc kubenswrapper[4812]: I1125 16:50:34.901219 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-82fvc" event={"ID":"fbb57832-3993-492b-80c9-a6a61891a125","Type":"ContainerStarted","Data":"a0a951c1ad08451d300c6760cda487de47e960af65bf4a79d41dc79094c79e76"} Nov 25 16:50:34 crc kubenswrapper[4812]: I1125 16:50:34.901901 4812 patch_prober.go:28] interesting pod/downloads-7954f5f757-dd95m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Nov 25 16:50:34 crc kubenswrapper[4812]: I1125 16:50:34.901956 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-dd95m" podUID="135cae13-5b75-4d98-9c17-61448faddf90" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Nov 25 16:50:34 crc kubenswrapper[4812]: E1125 16:50:34.902721 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-mdzhn" podUID="56af28bf-996b-442f-9caa-87f498b292cf" Nov 25 16:50:37 crc kubenswrapper[4812]: E1125 16:50:37.495152 4812 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 25 16:50:37 crc kubenswrapper[4812]: E1125 16:50:37.495979 4812 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-x6jjg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-49nlv_openshift-marketplace(c1f1e583-20dd-4501-bb66-a4ee8239367a): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 16:50:37 crc kubenswrapper[4812]: E1125 16:50:37.497178 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-49nlv" podUID="c1f1e583-20dd-4501-bb66-a4ee8239367a" Nov 25 16:50:37 crc kubenswrapper[4812]: E1125 16:50:37.506181 4812 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 25 16:50:37 crc kubenswrapper[4812]: E1125 16:50:37.506351 4812 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2wff8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-fgmnd_openshift-marketplace(906a6b44-74ed-468a-8519-c269d04cf34b): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 16:50:37 crc kubenswrapper[4812]: E1125 16:50:37.507526 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-fgmnd" podUID="906a6b44-74ed-468a-8519-c269d04cf34b" Nov 25 16:50:37 crc kubenswrapper[4812]: I1125 16:50:37.573057 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-dd95m" Nov 25 16:50:37 crc kubenswrapper[4812]: I1125 16:50:37.573502 4812 patch_prober.go:28] interesting pod/downloads-7954f5f757-dd95m container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Nov 25 16:50:37 crc kubenswrapper[4812]: I1125 16:50:37.573889 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-dd95m" podUID="135cae13-5b75-4d98-9c17-61448faddf90" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Nov 25 16:50:37 crc kubenswrapper[4812]: I1125 16:50:37.573733 4812 patch_prober.go:28] interesting pod/downloads-7954f5f757-dd95m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Nov 25 16:50:37 crc kubenswrapper[4812]: I1125 16:50:37.574033 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-dd95m" podUID="135cae13-5b75-4d98-9c17-61448faddf90" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Nov 25 16:50:37 crc kubenswrapper[4812]: I1125 16:50:37.574260 4812 patch_prober.go:28] interesting pod/downloads-7954f5f757-dd95m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Nov 25 16:50:37 crc kubenswrapper[4812]: I1125 16:50:37.574286 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-dd95m" podUID="135cae13-5b75-4d98-9c17-61448faddf90" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Nov 25 16:50:37 crc kubenswrapper[4812]: E1125 16:50:37.792878 4812 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 25 16:50:37 crc kubenswrapper[4812]: E1125 16:50:37.793125 4812 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2qr9s,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-zn9qf_openshift-marketplace(20e948a0-16c2-48f7-a9ec-a70685feda00): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 16:50:37 crc kubenswrapper[4812]: E1125 16:50:37.794345 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-zn9qf" podUID="20e948a0-16c2-48f7-a9ec-a70685feda00" Nov 25 16:50:43 crc kubenswrapper[4812]: E1125 16:50:43.937647 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-49nlv" podUID="c1f1e583-20dd-4501-bb66-a4ee8239367a" Nov 25 16:50:43 crc kubenswrapper[4812]: E1125 16:50:43.938016 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-fgmnd" podUID="906a6b44-74ed-468a-8519-c269d04cf34b" Nov 25 16:50:43 crc kubenswrapper[4812]: E1125 16:50:43.937562 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-zn9qf" podUID="20e948a0-16c2-48f7-a9ec-a70685feda00" Nov 25 16:50:47 crc kubenswrapper[4812]: I1125 16:50:47.583066 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-dd95m" Nov 25 16:50:50 crc kubenswrapper[4812]: E1125 16:50:50.285556 4812 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 25 16:50:50 crc kubenswrapper[4812]: E1125 16:50:50.286066 4812 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pd85m,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-chhxl_openshift-marketplace(85faff28-66f5-478c-a85a-5e6c26a50106): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 16:50:50 crc kubenswrapper[4812]: E1125 16:50:50.287273 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-chhxl" podUID="85faff28-66f5-478c-a85a-5e6c26a50106" Nov 25 16:50:50 crc kubenswrapper[4812]: E1125 16:50:50.572775 4812 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 25 16:50:50 crc kubenswrapper[4812]: E1125 16:50:50.573386 4812 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mrxgh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-pfrp6_openshift-marketplace(f2cd8a2b-d216-49b7-b86c-fa6b743f238d): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 16:50:50 crc kubenswrapper[4812]: E1125 16:50:50.574614 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-pfrp6" podUID="f2cd8a2b-d216-49b7-b86c-fa6b743f238d" Nov 25 16:50:50 crc kubenswrapper[4812]: I1125 16:50:50.997458 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-82fvc" event={"ID":"fbb57832-3993-492b-80c9-a6a61891a125","Type":"ContainerStarted","Data":"2368e29167edf6d4770e38245676cc0aef2fbad32a84d215797d7a1058d33db6"} Nov 25 16:50:51 crc kubenswrapper[4812]: I1125 16:50:51.016957 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-82fvc" podStartSLOduration=204.016927279 podStartE2EDuration="3m24.016927279s" podCreationTimestamp="2025-11-25 16:47:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:50:51.011104277 +0000 UTC m=+225.851246392" watchObservedRunningTime="2025-11-25 16:50:51.016927279 +0000 UTC m=+225.857069384" Nov 25 16:50:52 crc kubenswrapper[4812]: I1125 16:50:52.007740 4812 generic.go:334] "Generic (PLEG): container finished" podID="17f59f39-a958-4cb4-8a6a-679e7f08a13b" containerID="9fab543b32520406fed535c69d7779732a9d16cae46c402faaa0178240d8b478" exitCode=0 Nov 25 16:50:52 crc kubenswrapper[4812]: I1125 16:50:52.007845 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-whrrz" event={"ID":"17f59f39-a958-4cb4-8a6a-679e7f08a13b","Type":"ContainerDied","Data":"9fab543b32520406fed535c69d7779732a9d16cae46c402faaa0178240d8b478"} Nov 25 16:50:52 crc kubenswrapper[4812]: E1125 16:50:52.488886 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-pfrp6" podUID="f2cd8a2b-d216-49b7-b86c-fa6b743f238d" Nov 25 16:50:53 crc kubenswrapper[4812]: I1125 16:50:53.019409 4812 generic.go:334] "Generic (PLEG): container finished" podID="56af28bf-996b-442f-9caa-87f498b292cf" containerID="030ea420b928ec186e3f83ea0bcd019258f6cb3f21e04f59e9b8929ebd14de33" exitCode=0 Nov 25 16:50:53 crc kubenswrapper[4812]: I1125 16:50:53.019459 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mdzhn" event={"ID":"56af28bf-996b-442f-9caa-87f498b292cf","Type":"ContainerDied","Data":"030ea420b928ec186e3f83ea0bcd019258f6cb3f21e04f59e9b8929ebd14de33"} Nov 25 16:50:53 crc kubenswrapper[4812]: I1125 16:50:53.024738 4812 generic.go:334] "Generic (PLEG): container finished" podID="df156ba1-d2fb-45ea-bffe-4b2bdeb18d72" containerID="5fba6613f2723992b00834bb8c2fe3b13b1f04664088261802779c5229258489" exitCode=0 Nov 25 16:50:53 crc kubenswrapper[4812]: I1125 16:50:53.024774 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jwjxx" event={"ID":"df156ba1-d2fb-45ea-bffe-4b2bdeb18d72","Type":"ContainerDied","Data":"5fba6613f2723992b00834bb8c2fe3b13b1f04664088261802779c5229258489"} Nov 25 16:50:54 crc kubenswrapper[4812]: I1125 16:50:54.033318 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-whrrz" event={"ID":"17f59f39-a958-4cb4-8a6a-679e7f08a13b","Type":"ContainerStarted","Data":"b4f6b2a396991e14acfc471e6cfbcbcf91dda6fffe8004359f12e45b50e2af19"} Nov 25 16:50:54 crc kubenswrapper[4812]: I1125 16:50:54.052998 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-whrrz" podStartSLOduration=3.344289466 podStartE2EDuration="1m20.052976559s" podCreationTimestamp="2025-11-25 16:49:34 +0000 UTC" firstStartedPulling="2025-11-25 16:49:36.362167593 +0000 UTC m=+151.202309688" lastFinishedPulling="2025-11-25 16:50:53.070854686 +0000 UTC m=+227.910996781" observedRunningTime="2025-11-25 16:50:54.049323621 +0000 UTC m=+228.889465736" watchObservedRunningTime="2025-11-25 16:50:54.052976559 +0000 UTC m=+228.893118654" Nov 25 16:50:54 crc kubenswrapper[4812]: I1125 16:50:54.971629 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-whrrz" Nov 25 16:50:54 crc kubenswrapper[4812]: I1125 16:50:54.972293 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-whrrz" Nov 25 16:50:55 crc kubenswrapper[4812]: I1125 16:50:55.041971 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mdzhn" event={"ID":"56af28bf-996b-442f-9caa-87f498b292cf","Type":"ContainerStarted","Data":"e4f80975316b84f3c15a86c8b507396f1adf9d9a753a0786336a07ddc9b88ac3"} Nov 25 16:50:55 crc kubenswrapper[4812]: I1125 16:50:55.045284 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jwjxx" event={"ID":"df156ba1-d2fb-45ea-bffe-4b2bdeb18d72","Type":"ContainerStarted","Data":"5b9dafe90b710850847699dab05556773af8ad5bb9f392dcec9aeb8a115d84f5"} Nov 25 16:50:55 crc kubenswrapper[4812]: I1125 16:50:55.061973 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-mdzhn" podStartSLOduration=3.43606503 podStartE2EDuration="1m20.061957263s" podCreationTimestamp="2025-11-25 16:49:35 +0000 UTC" firstStartedPulling="2025-11-25 16:49:37.437196433 +0000 UTC m=+152.277338528" lastFinishedPulling="2025-11-25 16:50:54.063088666 +0000 UTC m=+228.903230761" observedRunningTime="2025-11-25 16:50:55.057399508 +0000 UTC m=+229.897541603" watchObservedRunningTime="2025-11-25 16:50:55.061957263 +0000 UTC m=+229.902099368" Nov 25 16:50:55 crc kubenswrapper[4812]: I1125 16:50:55.079827 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-jwjxx" podStartSLOduration=3.614622905 podStartE2EDuration="1m19.079809778s" podCreationTimestamp="2025-11-25 16:49:36 +0000 UTC" firstStartedPulling="2025-11-25 16:49:38.427954327 +0000 UTC m=+153.268096422" lastFinishedPulling="2025-11-25 16:50:53.89314121 +0000 UTC m=+228.733283295" observedRunningTime="2025-11-25 16:50:55.07682053 +0000 UTC m=+229.916962635" watchObservedRunningTime="2025-11-25 16:50:55.079809778 +0000 UTC m=+229.919951873" Nov 25 16:50:55 crc kubenswrapper[4812]: I1125 16:50:55.583775 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-mdzhn" Nov 25 16:50:55 crc kubenswrapper[4812]: I1125 16:50:55.583843 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-mdzhn" Nov 25 16:50:56 crc kubenswrapper[4812]: I1125 16:50:56.257953 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-whrrz" podUID="17f59f39-a958-4cb4-8a6a-679e7f08a13b" containerName="registry-server" probeResult="failure" output=< Nov 25 16:50:56 crc kubenswrapper[4812]: timeout: failed to connect service ":50051" within 1s Nov 25 16:50:56 crc kubenswrapper[4812]: > Nov 25 16:50:56 crc kubenswrapper[4812]: I1125 16:50:56.625300 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-mdzhn" podUID="56af28bf-996b-442f-9caa-87f498b292cf" containerName="registry-server" probeResult="failure" output=< Nov 25 16:50:56 crc kubenswrapper[4812]: timeout: failed to connect service ":50051" within 1s Nov 25 16:50:56 crc kubenswrapper[4812]: > Nov 25 16:50:57 crc kubenswrapper[4812]: I1125 16:50:57.100184 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-jwjxx" Nov 25 16:50:57 crc kubenswrapper[4812]: I1125 16:50:57.101148 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-jwjxx" Nov 25 16:50:57 crc kubenswrapper[4812]: I1125 16:50:57.191626 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-jwjxx" Nov 25 16:50:57 crc kubenswrapper[4812]: I1125 16:50:57.332422 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:50:57 crc kubenswrapper[4812]: I1125 16:50:57.332503 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:50:57 crc kubenswrapper[4812]: I1125 16:50:57.332634 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" Nov 25 16:50:57 crc kubenswrapper[4812]: I1125 16:50:57.333260 4812 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6"} pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 16:50:57 crc kubenswrapper[4812]: I1125 16:50:57.333312 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" containerID="cri-o://e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6" gracePeriod=600 Nov 25 16:50:58 crc kubenswrapper[4812]: I1125 16:50:58.079024 4812 generic.go:334] "Generic (PLEG): container finished" podID="8ed911cf-2139-4b12-84ba-af635585ba29" containerID="e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6" exitCode=0 Nov 25 16:50:58 crc kubenswrapper[4812]: I1125 16:50:58.079257 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" event={"ID":"8ed911cf-2139-4b12-84ba-af635585ba29","Type":"ContainerDied","Data":"e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6"} Nov 25 16:50:59 crc kubenswrapper[4812]: I1125 16:50:59.085360 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-49nlv" event={"ID":"c1f1e583-20dd-4501-bb66-a4ee8239367a","Type":"ContainerStarted","Data":"41f8b9cc5aab1ff59e6855172fab87efde12b1a2e072c4b0f7d4b641fb272fb0"} Nov 25 16:50:59 crc kubenswrapper[4812]: I1125 16:50:59.087119 4812 generic.go:334] "Generic (PLEG): container finished" podID="906a6b44-74ed-468a-8519-c269d04cf34b" containerID="d290d1186419ea98735762cecb2fd2293ce6e361250fe89da33c4d680c9ae29d" exitCode=0 Nov 25 16:50:59 crc kubenswrapper[4812]: I1125 16:50:59.087183 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fgmnd" event={"ID":"906a6b44-74ed-468a-8519-c269d04cf34b","Type":"ContainerDied","Data":"d290d1186419ea98735762cecb2fd2293ce6e361250fe89da33c4d680c9ae29d"} Nov 25 16:50:59 crc kubenswrapper[4812]: I1125 16:50:59.090731 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" event={"ID":"8ed911cf-2139-4b12-84ba-af635585ba29","Type":"ContainerStarted","Data":"6a4a8e154a6e12db11fb5ac4d932b68107c12d9d0d7b66465ae6e941c31a59a0"} Nov 25 16:50:59 crc kubenswrapper[4812]: I1125 16:50:59.140654 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-jwjxx" Nov 25 16:51:00 crc kubenswrapper[4812]: I1125 16:51:00.097723 4812 generic.go:334] "Generic (PLEG): container finished" podID="c1f1e583-20dd-4501-bb66-a4ee8239367a" containerID="41f8b9cc5aab1ff59e6855172fab87efde12b1a2e072c4b0f7d4b641fb272fb0" exitCode=0 Nov 25 16:51:00 crc kubenswrapper[4812]: I1125 16:51:00.097809 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-49nlv" event={"ID":"c1f1e583-20dd-4501-bb66-a4ee8239367a","Type":"ContainerDied","Data":"41f8b9cc5aab1ff59e6855172fab87efde12b1a2e072c4b0f7d4b641fb272fb0"} Nov 25 16:51:01 crc kubenswrapper[4812]: I1125 16:51:01.110220 4812 generic.go:334] "Generic (PLEG): container finished" podID="20e948a0-16c2-48f7-a9ec-a70685feda00" containerID="1dba29fa22d568104988189c181b46bdb32dc908b438b4c3507e4adc45f6f37e" exitCode=0 Nov 25 16:51:01 crc kubenswrapper[4812]: I1125 16:51:01.110326 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zn9qf" event={"ID":"20e948a0-16c2-48f7-a9ec-a70685feda00","Type":"ContainerDied","Data":"1dba29fa22d568104988189c181b46bdb32dc908b438b4c3507e4adc45f6f37e"} Nov 25 16:51:01 crc kubenswrapper[4812]: I1125 16:51:01.112520 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-49nlv" event={"ID":"c1f1e583-20dd-4501-bb66-a4ee8239367a","Type":"ContainerStarted","Data":"49ba22f4a117f9f3b1485d8b4d4a8581d15157c6def05b2140c1724e89c4c675"} Nov 25 16:51:01 crc kubenswrapper[4812]: I1125 16:51:01.114432 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fgmnd" event={"ID":"906a6b44-74ed-468a-8519-c269d04cf34b","Type":"ContainerStarted","Data":"cb7ef3f81b6f3fd79e09da2cd7c7a113616f301081f168cb9cc68d14cc03fe5d"} Nov 25 16:51:01 crc kubenswrapper[4812]: I1125 16:51:01.148519 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-49nlv" podStartSLOduration=2.6619855770000003 podStartE2EDuration="1m27.148496179s" podCreationTimestamp="2025-11-25 16:49:34 +0000 UTC" firstStartedPulling="2025-11-25 16:49:36.369321016 +0000 UTC m=+151.209463111" lastFinishedPulling="2025-11-25 16:51:00.855831618 +0000 UTC m=+235.695973713" observedRunningTime="2025-11-25 16:51:01.147303964 +0000 UTC m=+235.987446069" watchObservedRunningTime="2025-11-25 16:51:01.148496179 +0000 UTC m=+235.988638274" Nov 25 16:51:01 crc kubenswrapper[4812]: I1125 16:51:01.166736 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-fgmnd" podStartSLOduration=2.799110938 podStartE2EDuration="1m27.166720936s" podCreationTimestamp="2025-11-25 16:49:34 +0000 UTC" firstStartedPulling="2025-11-25 16:49:36.369844792 +0000 UTC m=+151.209986887" lastFinishedPulling="2025-11-25 16:51:00.73745479 +0000 UTC m=+235.577596885" observedRunningTime="2025-11-25 16:51:01.164296995 +0000 UTC m=+236.004439100" watchObservedRunningTime="2025-11-25 16:51:01.166720936 +0000 UTC m=+236.006863031" Nov 25 16:51:03 crc kubenswrapper[4812]: I1125 16:51:03.130818 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zn9qf" event={"ID":"20e948a0-16c2-48f7-a9ec-a70685feda00","Type":"ContainerStarted","Data":"e76be85f31a21d630fd87cfc884f85b358044e39332d303e66a22b7a4677b4c0"} Nov 25 16:51:04 crc kubenswrapper[4812]: I1125 16:51:04.480649 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-zn9qf" podStartSLOduration=3.8349725919999997 podStartE2EDuration="1m27.480625563s" podCreationTimestamp="2025-11-25 16:49:37 +0000 UTC" firstStartedPulling="2025-11-25 16:49:38.436117372 +0000 UTC m=+153.276259477" lastFinishedPulling="2025-11-25 16:51:02.081770353 +0000 UTC m=+236.921912448" observedRunningTime="2025-11-25 16:51:03.146774008 +0000 UTC m=+237.986916123" watchObservedRunningTime="2025-11-25 16:51:04.480625563 +0000 UTC m=+239.320767658" Nov 25 16:51:04 crc kubenswrapper[4812]: I1125 16:51:04.482607 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-fjv5r"] Nov 25 16:51:05 crc kubenswrapper[4812]: I1125 16:51:05.010260 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-whrrz" Nov 25 16:51:05 crc kubenswrapper[4812]: I1125 16:51:05.050054 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-whrrz" Nov 25 16:51:05 crc kubenswrapper[4812]: I1125 16:51:05.217180 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-49nlv" Nov 25 16:51:05 crc kubenswrapper[4812]: I1125 16:51:05.217519 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-49nlv" Nov 25 16:51:05 crc kubenswrapper[4812]: I1125 16:51:05.253460 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-49nlv" Nov 25 16:51:05 crc kubenswrapper[4812]: I1125 16:51:05.401721 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-fgmnd" Nov 25 16:51:05 crc kubenswrapper[4812]: I1125 16:51:05.401777 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-fgmnd" Nov 25 16:51:05 crc kubenswrapper[4812]: I1125 16:51:05.439696 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-fgmnd" Nov 25 16:51:05 crc kubenswrapper[4812]: I1125 16:51:05.625270 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-mdzhn" Nov 25 16:51:05 crc kubenswrapper[4812]: I1125 16:51:05.666656 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-mdzhn" Nov 25 16:51:06 crc kubenswrapper[4812]: I1125 16:51:06.193845 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-49nlv" Nov 25 16:51:06 crc kubenswrapper[4812]: I1125 16:51:06.194363 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-fgmnd" Nov 25 16:51:07 crc kubenswrapper[4812]: I1125 16:51:07.535410 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-zn9qf" Nov 25 16:51:07 crc kubenswrapper[4812]: I1125 16:51:07.539249 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-zn9qf" Nov 25 16:51:07 crc kubenswrapper[4812]: I1125 16:51:07.578874 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-zn9qf" Nov 25 16:51:08 crc kubenswrapper[4812]: I1125 16:51:08.158602 4812 generic.go:334] "Generic (PLEG): container finished" podID="85faff28-66f5-478c-a85a-5e6c26a50106" containerID="c38ddc47cf73282bc7993deb4c917398dca5e5a8cd73e14b0bfe4e6592e262e9" exitCode=0 Nov 25 16:51:08 crc kubenswrapper[4812]: I1125 16:51:08.158675 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-chhxl" event={"ID":"85faff28-66f5-478c-a85a-5e6c26a50106","Type":"ContainerDied","Data":"c38ddc47cf73282bc7993deb4c917398dca5e5a8cd73e14b0bfe4e6592e262e9"} Nov 25 16:51:08 crc kubenswrapper[4812]: I1125 16:51:08.161453 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pfrp6" event={"ID":"f2cd8a2b-d216-49b7-b86c-fa6b743f238d","Type":"ContainerStarted","Data":"eb196078a050e20df99537a02a1f1abfc1c84e12339e18108ebee9822041758a"} Nov 25 16:51:08 crc kubenswrapper[4812]: I1125 16:51:08.208756 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-zn9qf" Nov 25 16:51:09 crc kubenswrapper[4812]: I1125 16:51:09.169559 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-chhxl" event={"ID":"85faff28-66f5-478c-a85a-5e6c26a50106","Type":"ContainerStarted","Data":"d53c1271d9e8d253d6b6879aa9feb0ccdd97f092f56a05b2c4361670151060cf"} Nov 25 16:51:09 crc kubenswrapper[4812]: I1125 16:51:09.171457 4812 generic.go:334] "Generic (PLEG): container finished" podID="f2cd8a2b-d216-49b7-b86c-fa6b743f238d" containerID="eb196078a050e20df99537a02a1f1abfc1c84e12339e18108ebee9822041758a" exitCode=0 Nov 25 16:51:09 crc kubenswrapper[4812]: I1125 16:51:09.171512 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pfrp6" event={"ID":"f2cd8a2b-d216-49b7-b86c-fa6b743f238d","Type":"ContainerDied","Data":"eb196078a050e20df99537a02a1f1abfc1c84e12339e18108ebee9822041758a"} Nov 25 16:51:09 crc kubenswrapper[4812]: I1125 16:51:09.191071 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-chhxl" podStartSLOduration=4.150192473 podStartE2EDuration="1m31.191049829s" podCreationTimestamp="2025-11-25 16:49:38 +0000 UTC" firstStartedPulling="2025-11-25 16:49:41.523345156 +0000 UTC m=+156.363487241" lastFinishedPulling="2025-11-25 16:51:08.564202502 +0000 UTC m=+243.404344597" observedRunningTime="2025-11-25 16:51:09.187187665 +0000 UTC m=+244.027329770" watchObservedRunningTime="2025-11-25 16:51:09.191049829 +0000 UTC m=+244.031191924" Nov 25 16:51:09 crc kubenswrapper[4812]: I1125 16:51:09.246519 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fgmnd"] Nov 25 16:51:09 crc kubenswrapper[4812]: I1125 16:51:09.246755 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-fgmnd" podUID="906a6b44-74ed-468a-8519-c269d04cf34b" containerName="registry-server" containerID="cri-o://cb7ef3f81b6f3fd79e09da2cd7c7a113616f301081f168cb9cc68d14cc03fe5d" gracePeriod=2 Nov 25 16:51:09 crc kubenswrapper[4812]: I1125 16:51:09.447994 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mdzhn"] Nov 25 16:51:09 crc kubenswrapper[4812]: I1125 16:51:09.448217 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-mdzhn" podUID="56af28bf-996b-442f-9caa-87f498b292cf" containerName="registry-server" containerID="cri-o://e4f80975316b84f3c15a86c8b507396f1adf9d9a753a0786336a07ddc9b88ac3" gracePeriod=2 Nov 25 16:51:09 crc kubenswrapper[4812]: I1125 16:51:09.620108 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fgmnd" Nov 25 16:51:09 crc kubenswrapper[4812]: I1125 16:51:09.801703 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/906a6b44-74ed-468a-8519-c269d04cf34b-utilities\") pod \"906a6b44-74ed-468a-8519-c269d04cf34b\" (UID: \"906a6b44-74ed-468a-8519-c269d04cf34b\") " Nov 25 16:51:09 crc kubenswrapper[4812]: I1125 16:51:09.802155 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/906a6b44-74ed-468a-8519-c269d04cf34b-catalog-content\") pod \"906a6b44-74ed-468a-8519-c269d04cf34b\" (UID: \"906a6b44-74ed-468a-8519-c269d04cf34b\") " Nov 25 16:51:09 crc kubenswrapper[4812]: I1125 16:51:09.802217 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2wff8\" (UniqueName: \"kubernetes.io/projected/906a6b44-74ed-468a-8519-c269d04cf34b-kube-api-access-2wff8\") pod \"906a6b44-74ed-468a-8519-c269d04cf34b\" (UID: \"906a6b44-74ed-468a-8519-c269d04cf34b\") " Nov 25 16:51:09 crc kubenswrapper[4812]: I1125 16:51:09.802654 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/906a6b44-74ed-468a-8519-c269d04cf34b-utilities" (OuterVolumeSpecName: "utilities") pod "906a6b44-74ed-468a-8519-c269d04cf34b" (UID: "906a6b44-74ed-468a-8519-c269d04cf34b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:51:09 crc kubenswrapper[4812]: I1125 16:51:09.808897 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/906a6b44-74ed-468a-8519-c269d04cf34b-kube-api-access-2wff8" (OuterVolumeSpecName: "kube-api-access-2wff8") pod "906a6b44-74ed-468a-8519-c269d04cf34b" (UID: "906a6b44-74ed-468a-8519-c269d04cf34b"). InnerVolumeSpecName "kube-api-access-2wff8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:51:09 crc kubenswrapper[4812]: I1125 16:51:09.831058 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mdzhn" Nov 25 16:51:09 crc kubenswrapper[4812]: I1125 16:51:09.856142 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/906a6b44-74ed-468a-8519-c269d04cf34b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "906a6b44-74ed-468a-8519-c269d04cf34b" (UID: "906a6b44-74ed-468a-8519-c269d04cf34b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:51:09 crc kubenswrapper[4812]: I1125 16:51:09.904106 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/906a6b44-74ed-468a-8519-c269d04cf34b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:09 crc kubenswrapper[4812]: I1125 16:51:09.904143 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2wff8\" (UniqueName: \"kubernetes.io/projected/906a6b44-74ed-468a-8519-c269d04cf34b-kube-api-access-2wff8\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:09 crc kubenswrapper[4812]: I1125 16:51:09.904158 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/906a6b44-74ed-468a-8519-c269d04cf34b-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.005314 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-glz4p\" (UniqueName: \"kubernetes.io/projected/56af28bf-996b-442f-9caa-87f498b292cf-kube-api-access-glz4p\") pod \"56af28bf-996b-442f-9caa-87f498b292cf\" (UID: \"56af28bf-996b-442f-9caa-87f498b292cf\") " Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.005431 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/56af28bf-996b-442f-9caa-87f498b292cf-utilities\") pod \"56af28bf-996b-442f-9caa-87f498b292cf\" (UID: \"56af28bf-996b-442f-9caa-87f498b292cf\") " Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.005468 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/56af28bf-996b-442f-9caa-87f498b292cf-catalog-content\") pod \"56af28bf-996b-442f-9caa-87f498b292cf\" (UID: \"56af28bf-996b-442f-9caa-87f498b292cf\") " Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.006882 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/56af28bf-996b-442f-9caa-87f498b292cf-utilities" (OuterVolumeSpecName: "utilities") pod "56af28bf-996b-442f-9caa-87f498b292cf" (UID: "56af28bf-996b-442f-9caa-87f498b292cf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.008784 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/56af28bf-996b-442f-9caa-87f498b292cf-kube-api-access-glz4p" (OuterVolumeSpecName: "kube-api-access-glz4p") pod "56af28bf-996b-442f-9caa-87f498b292cf" (UID: "56af28bf-996b-442f-9caa-87f498b292cf"). InnerVolumeSpecName "kube-api-access-glz4p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.057317 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/56af28bf-996b-442f-9caa-87f498b292cf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "56af28bf-996b-442f-9caa-87f498b292cf" (UID: "56af28bf-996b-442f-9caa-87f498b292cf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.106918 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-glz4p\" (UniqueName: \"kubernetes.io/projected/56af28bf-996b-442f-9caa-87f498b292cf-kube-api-access-glz4p\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.106960 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/56af28bf-996b-442f-9caa-87f498b292cf-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.106973 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/56af28bf-996b-442f-9caa-87f498b292cf-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.182700 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pfrp6" event={"ID":"f2cd8a2b-d216-49b7-b86c-fa6b743f238d","Type":"ContainerStarted","Data":"af0e44f49772d94b0a6010454b70e955e73f1bc92febad03fe7cb6139ea4f291"} Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.194366 4812 generic.go:334] "Generic (PLEG): container finished" podID="56af28bf-996b-442f-9caa-87f498b292cf" containerID="e4f80975316b84f3c15a86c8b507396f1adf9d9a753a0786336a07ddc9b88ac3" exitCode=0 Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.194459 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mdzhn" event={"ID":"56af28bf-996b-442f-9caa-87f498b292cf","Type":"ContainerDied","Data":"e4f80975316b84f3c15a86c8b507396f1adf9d9a753a0786336a07ddc9b88ac3"} Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.194468 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mdzhn" Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.194647 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mdzhn" event={"ID":"56af28bf-996b-442f-9caa-87f498b292cf","Type":"ContainerDied","Data":"07b3f31507f65edb3caec1f6c2d41b3021b755d380782b444d203e7a6459f232"} Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.194680 4812 scope.go:117] "RemoveContainer" containerID="e4f80975316b84f3c15a86c8b507396f1adf9d9a753a0786336a07ddc9b88ac3" Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.197079 4812 generic.go:334] "Generic (PLEG): container finished" podID="906a6b44-74ed-468a-8519-c269d04cf34b" containerID="cb7ef3f81b6f3fd79e09da2cd7c7a113616f301081f168cb9cc68d14cc03fe5d" exitCode=0 Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.197826 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fgmnd" Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.198642 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fgmnd" event={"ID":"906a6b44-74ed-468a-8519-c269d04cf34b","Type":"ContainerDied","Data":"cb7ef3f81b6f3fd79e09da2cd7c7a113616f301081f168cb9cc68d14cc03fe5d"} Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.198693 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fgmnd" event={"ID":"906a6b44-74ed-468a-8519-c269d04cf34b","Type":"ContainerDied","Data":"2b2256c5778bf115395fb75aa48c56a4f3840b521e487ee8cae4209855a81abd"} Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.213211 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-pfrp6" podStartSLOduration=4.177669863 podStartE2EDuration="1m33.213191671s" podCreationTimestamp="2025-11-25 16:49:37 +0000 UTC" firstStartedPulling="2025-11-25 16:49:40.52481761 +0000 UTC m=+155.364959705" lastFinishedPulling="2025-11-25 16:51:09.560339418 +0000 UTC m=+244.400481513" observedRunningTime="2025-11-25 16:51:10.208159292 +0000 UTC m=+245.048301397" watchObservedRunningTime="2025-11-25 16:51:10.213191671 +0000 UTC m=+245.053333766" Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.213610 4812 scope.go:117] "RemoveContainer" containerID="030ea420b928ec186e3f83ea0bcd019258f6cb3f21e04f59e9b8929ebd14de33" Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.234849 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mdzhn"] Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.241453 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-mdzhn"] Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.248052 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fgmnd"] Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.251815 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-fgmnd"] Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.253622 4812 scope.go:117] "RemoveContainer" containerID="e295a6c68fab1b5a986c563cc49aff9e5b031db56d03e1f93694d7ea07e2c89a" Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.267689 4812 scope.go:117] "RemoveContainer" containerID="e4f80975316b84f3c15a86c8b507396f1adf9d9a753a0786336a07ddc9b88ac3" Nov 25 16:51:10 crc kubenswrapper[4812]: E1125 16:51:10.268113 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e4f80975316b84f3c15a86c8b507396f1adf9d9a753a0786336a07ddc9b88ac3\": container with ID starting with e4f80975316b84f3c15a86c8b507396f1adf9d9a753a0786336a07ddc9b88ac3 not found: ID does not exist" containerID="e4f80975316b84f3c15a86c8b507396f1adf9d9a753a0786336a07ddc9b88ac3" Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.268160 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4f80975316b84f3c15a86c8b507396f1adf9d9a753a0786336a07ddc9b88ac3"} err="failed to get container status \"e4f80975316b84f3c15a86c8b507396f1adf9d9a753a0786336a07ddc9b88ac3\": rpc error: code = NotFound desc = could not find container \"e4f80975316b84f3c15a86c8b507396f1adf9d9a753a0786336a07ddc9b88ac3\": container with ID starting with e4f80975316b84f3c15a86c8b507396f1adf9d9a753a0786336a07ddc9b88ac3 not found: ID does not exist" Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.268192 4812 scope.go:117] "RemoveContainer" containerID="030ea420b928ec186e3f83ea0bcd019258f6cb3f21e04f59e9b8929ebd14de33" Nov 25 16:51:10 crc kubenswrapper[4812]: E1125 16:51:10.268571 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"030ea420b928ec186e3f83ea0bcd019258f6cb3f21e04f59e9b8929ebd14de33\": container with ID starting with 030ea420b928ec186e3f83ea0bcd019258f6cb3f21e04f59e9b8929ebd14de33 not found: ID does not exist" containerID="030ea420b928ec186e3f83ea0bcd019258f6cb3f21e04f59e9b8929ebd14de33" Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.268604 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"030ea420b928ec186e3f83ea0bcd019258f6cb3f21e04f59e9b8929ebd14de33"} err="failed to get container status \"030ea420b928ec186e3f83ea0bcd019258f6cb3f21e04f59e9b8929ebd14de33\": rpc error: code = NotFound desc = could not find container \"030ea420b928ec186e3f83ea0bcd019258f6cb3f21e04f59e9b8929ebd14de33\": container with ID starting with 030ea420b928ec186e3f83ea0bcd019258f6cb3f21e04f59e9b8929ebd14de33 not found: ID does not exist" Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.268631 4812 scope.go:117] "RemoveContainer" containerID="e295a6c68fab1b5a986c563cc49aff9e5b031db56d03e1f93694d7ea07e2c89a" Nov 25 16:51:10 crc kubenswrapper[4812]: E1125 16:51:10.268894 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e295a6c68fab1b5a986c563cc49aff9e5b031db56d03e1f93694d7ea07e2c89a\": container with ID starting with e295a6c68fab1b5a986c563cc49aff9e5b031db56d03e1f93694d7ea07e2c89a not found: ID does not exist" containerID="e295a6c68fab1b5a986c563cc49aff9e5b031db56d03e1f93694d7ea07e2c89a" Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.268920 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e295a6c68fab1b5a986c563cc49aff9e5b031db56d03e1f93694d7ea07e2c89a"} err="failed to get container status \"e295a6c68fab1b5a986c563cc49aff9e5b031db56d03e1f93694d7ea07e2c89a\": rpc error: code = NotFound desc = could not find container \"e295a6c68fab1b5a986c563cc49aff9e5b031db56d03e1f93694d7ea07e2c89a\": container with ID starting with e295a6c68fab1b5a986c563cc49aff9e5b031db56d03e1f93694d7ea07e2c89a not found: ID does not exist" Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.268936 4812 scope.go:117] "RemoveContainer" containerID="cb7ef3f81b6f3fd79e09da2cd7c7a113616f301081f168cb9cc68d14cc03fe5d" Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.281317 4812 scope.go:117] "RemoveContainer" containerID="d290d1186419ea98735762cecb2fd2293ce6e361250fe89da33c4d680c9ae29d" Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.295163 4812 scope.go:117] "RemoveContainer" containerID="8d557da2401460f23c16fe25e3ce2c8e1717cab2ed49feb5b09823061b98cb80" Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.309030 4812 scope.go:117] "RemoveContainer" containerID="cb7ef3f81b6f3fd79e09da2cd7c7a113616f301081f168cb9cc68d14cc03fe5d" Nov 25 16:51:10 crc kubenswrapper[4812]: E1125 16:51:10.309821 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cb7ef3f81b6f3fd79e09da2cd7c7a113616f301081f168cb9cc68d14cc03fe5d\": container with ID starting with cb7ef3f81b6f3fd79e09da2cd7c7a113616f301081f168cb9cc68d14cc03fe5d not found: ID does not exist" containerID="cb7ef3f81b6f3fd79e09da2cd7c7a113616f301081f168cb9cc68d14cc03fe5d" Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.309885 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb7ef3f81b6f3fd79e09da2cd7c7a113616f301081f168cb9cc68d14cc03fe5d"} err="failed to get container status \"cb7ef3f81b6f3fd79e09da2cd7c7a113616f301081f168cb9cc68d14cc03fe5d\": rpc error: code = NotFound desc = could not find container \"cb7ef3f81b6f3fd79e09da2cd7c7a113616f301081f168cb9cc68d14cc03fe5d\": container with ID starting with cb7ef3f81b6f3fd79e09da2cd7c7a113616f301081f168cb9cc68d14cc03fe5d not found: ID does not exist" Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.309921 4812 scope.go:117] "RemoveContainer" containerID="d290d1186419ea98735762cecb2fd2293ce6e361250fe89da33c4d680c9ae29d" Nov 25 16:51:10 crc kubenswrapper[4812]: E1125 16:51:10.310242 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d290d1186419ea98735762cecb2fd2293ce6e361250fe89da33c4d680c9ae29d\": container with ID starting with d290d1186419ea98735762cecb2fd2293ce6e361250fe89da33c4d680c9ae29d not found: ID does not exist" containerID="d290d1186419ea98735762cecb2fd2293ce6e361250fe89da33c4d680c9ae29d" Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.310290 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d290d1186419ea98735762cecb2fd2293ce6e361250fe89da33c4d680c9ae29d"} err="failed to get container status \"d290d1186419ea98735762cecb2fd2293ce6e361250fe89da33c4d680c9ae29d\": rpc error: code = NotFound desc = could not find container \"d290d1186419ea98735762cecb2fd2293ce6e361250fe89da33c4d680c9ae29d\": container with ID starting with d290d1186419ea98735762cecb2fd2293ce6e361250fe89da33c4d680c9ae29d not found: ID does not exist" Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.310305 4812 scope.go:117] "RemoveContainer" containerID="8d557da2401460f23c16fe25e3ce2c8e1717cab2ed49feb5b09823061b98cb80" Nov 25 16:51:10 crc kubenswrapper[4812]: E1125 16:51:10.310690 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8d557da2401460f23c16fe25e3ce2c8e1717cab2ed49feb5b09823061b98cb80\": container with ID starting with 8d557da2401460f23c16fe25e3ce2c8e1717cab2ed49feb5b09823061b98cb80 not found: ID does not exist" containerID="8d557da2401460f23c16fe25e3ce2c8e1717cab2ed49feb5b09823061b98cb80" Nov 25 16:51:10 crc kubenswrapper[4812]: I1125 16:51:10.310748 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d557da2401460f23c16fe25e3ce2c8e1717cab2ed49feb5b09823061b98cb80"} err="failed to get container status \"8d557da2401460f23c16fe25e3ce2c8e1717cab2ed49feb5b09823061b98cb80\": rpc error: code = NotFound desc = could not find container \"8d557da2401460f23c16fe25e3ce2c8e1717cab2ed49feb5b09823061b98cb80\": container with ID starting with 8d557da2401460f23c16fe25e3ce2c8e1717cab2ed49feb5b09823061b98cb80 not found: ID does not exist" Nov 25 16:51:11 crc kubenswrapper[4812]: I1125 16:51:11.836790 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="56af28bf-996b-442f-9caa-87f498b292cf" path="/var/lib/kubelet/pods/56af28bf-996b-442f-9caa-87f498b292cf/volumes" Nov 25 16:51:11 crc kubenswrapper[4812]: I1125 16:51:11.837805 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="906a6b44-74ed-468a-8519-c269d04cf34b" path="/var/lib/kubelet/pods/906a6b44-74ed-468a-8519-c269d04cf34b/volumes" Nov 25 16:51:11 crc kubenswrapper[4812]: I1125 16:51:11.848248 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-zn9qf"] Nov 25 16:51:11 crc kubenswrapper[4812]: I1125 16:51:11.848466 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-zn9qf" podUID="20e948a0-16c2-48f7-a9ec-a70685feda00" containerName="registry-server" containerID="cri-o://e76be85f31a21d630fd87cfc884f85b358044e39332d303e66a22b7a4677b4c0" gracePeriod=2 Nov 25 16:51:12 crc kubenswrapper[4812]: I1125 16:51:12.205964 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zn9qf" Nov 25 16:51:12 crc kubenswrapper[4812]: I1125 16:51:12.209730 4812 generic.go:334] "Generic (PLEG): container finished" podID="20e948a0-16c2-48f7-a9ec-a70685feda00" containerID="e76be85f31a21d630fd87cfc884f85b358044e39332d303e66a22b7a4677b4c0" exitCode=0 Nov 25 16:51:12 crc kubenswrapper[4812]: I1125 16:51:12.209764 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zn9qf" event={"ID":"20e948a0-16c2-48f7-a9ec-a70685feda00","Type":"ContainerDied","Data":"e76be85f31a21d630fd87cfc884f85b358044e39332d303e66a22b7a4677b4c0"} Nov 25 16:51:12 crc kubenswrapper[4812]: I1125 16:51:12.209787 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zn9qf" event={"ID":"20e948a0-16c2-48f7-a9ec-a70685feda00","Type":"ContainerDied","Data":"a669686fc0843e3c4a10544b8e649e115435964bcbcb5361da49c6d49000455c"} Nov 25 16:51:12 crc kubenswrapper[4812]: I1125 16:51:12.209804 4812 scope.go:117] "RemoveContainer" containerID="e76be85f31a21d630fd87cfc884f85b358044e39332d303e66a22b7a4677b4c0" Nov 25 16:51:12 crc kubenswrapper[4812]: I1125 16:51:12.209830 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zn9qf" Nov 25 16:51:12 crc kubenswrapper[4812]: I1125 16:51:12.231093 4812 scope.go:117] "RemoveContainer" containerID="1dba29fa22d568104988189c181b46bdb32dc908b438b4c3507e4adc45f6f37e" Nov 25 16:51:12 crc kubenswrapper[4812]: I1125 16:51:12.254147 4812 scope.go:117] "RemoveContainer" containerID="ad6f6afbfa25c5d494194b9bbadfa310e05ceb7e6e7a556b64280d94aa5deb75" Nov 25 16:51:12 crc kubenswrapper[4812]: I1125 16:51:12.289783 4812 scope.go:117] "RemoveContainer" containerID="e76be85f31a21d630fd87cfc884f85b358044e39332d303e66a22b7a4677b4c0" Nov 25 16:51:12 crc kubenswrapper[4812]: E1125 16:51:12.290162 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e76be85f31a21d630fd87cfc884f85b358044e39332d303e66a22b7a4677b4c0\": container with ID starting with e76be85f31a21d630fd87cfc884f85b358044e39332d303e66a22b7a4677b4c0 not found: ID does not exist" containerID="e76be85f31a21d630fd87cfc884f85b358044e39332d303e66a22b7a4677b4c0" Nov 25 16:51:12 crc kubenswrapper[4812]: I1125 16:51:12.290212 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e76be85f31a21d630fd87cfc884f85b358044e39332d303e66a22b7a4677b4c0"} err="failed to get container status \"e76be85f31a21d630fd87cfc884f85b358044e39332d303e66a22b7a4677b4c0\": rpc error: code = NotFound desc = could not find container \"e76be85f31a21d630fd87cfc884f85b358044e39332d303e66a22b7a4677b4c0\": container with ID starting with e76be85f31a21d630fd87cfc884f85b358044e39332d303e66a22b7a4677b4c0 not found: ID does not exist" Nov 25 16:51:12 crc kubenswrapper[4812]: I1125 16:51:12.290233 4812 scope.go:117] "RemoveContainer" containerID="1dba29fa22d568104988189c181b46bdb32dc908b438b4c3507e4adc45f6f37e" Nov 25 16:51:12 crc kubenswrapper[4812]: E1125 16:51:12.290639 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1dba29fa22d568104988189c181b46bdb32dc908b438b4c3507e4adc45f6f37e\": container with ID starting with 1dba29fa22d568104988189c181b46bdb32dc908b438b4c3507e4adc45f6f37e not found: ID does not exist" containerID="1dba29fa22d568104988189c181b46bdb32dc908b438b4c3507e4adc45f6f37e" Nov 25 16:51:12 crc kubenswrapper[4812]: I1125 16:51:12.290664 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1dba29fa22d568104988189c181b46bdb32dc908b438b4c3507e4adc45f6f37e"} err="failed to get container status \"1dba29fa22d568104988189c181b46bdb32dc908b438b4c3507e4adc45f6f37e\": rpc error: code = NotFound desc = could not find container \"1dba29fa22d568104988189c181b46bdb32dc908b438b4c3507e4adc45f6f37e\": container with ID starting with 1dba29fa22d568104988189c181b46bdb32dc908b438b4c3507e4adc45f6f37e not found: ID does not exist" Nov 25 16:51:12 crc kubenswrapper[4812]: I1125 16:51:12.290680 4812 scope.go:117] "RemoveContainer" containerID="ad6f6afbfa25c5d494194b9bbadfa310e05ceb7e6e7a556b64280d94aa5deb75" Nov 25 16:51:12 crc kubenswrapper[4812]: E1125 16:51:12.291025 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ad6f6afbfa25c5d494194b9bbadfa310e05ceb7e6e7a556b64280d94aa5deb75\": container with ID starting with ad6f6afbfa25c5d494194b9bbadfa310e05ceb7e6e7a556b64280d94aa5deb75 not found: ID does not exist" containerID="ad6f6afbfa25c5d494194b9bbadfa310e05ceb7e6e7a556b64280d94aa5deb75" Nov 25 16:51:12 crc kubenswrapper[4812]: I1125 16:51:12.291044 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ad6f6afbfa25c5d494194b9bbadfa310e05ceb7e6e7a556b64280d94aa5deb75"} err="failed to get container status \"ad6f6afbfa25c5d494194b9bbadfa310e05ceb7e6e7a556b64280d94aa5deb75\": rpc error: code = NotFound desc = could not find container \"ad6f6afbfa25c5d494194b9bbadfa310e05ceb7e6e7a556b64280d94aa5deb75\": container with ID starting with ad6f6afbfa25c5d494194b9bbadfa310e05ceb7e6e7a556b64280d94aa5deb75 not found: ID does not exist" Nov 25 16:51:12 crc kubenswrapper[4812]: I1125 16:51:12.337689 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2qr9s\" (UniqueName: \"kubernetes.io/projected/20e948a0-16c2-48f7-a9ec-a70685feda00-kube-api-access-2qr9s\") pod \"20e948a0-16c2-48f7-a9ec-a70685feda00\" (UID: \"20e948a0-16c2-48f7-a9ec-a70685feda00\") " Nov 25 16:51:12 crc kubenswrapper[4812]: I1125 16:51:12.337751 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20e948a0-16c2-48f7-a9ec-a70685feda00-utilities\") pod \"20e948a0-16c2-48f7-a9ec-a70685feda00\" (UID: \"20e948a0-16c2-48f7-a9ec-a70685feda00\") " Nov 25 16:51:12 crc kubenswrapper[4812]: I1125 16:51:12.337800 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20e948a0-16c2-48f7-a9ec-a70685feda00-catalog-content\") pod \"20e948a0-16c2-48f7-a9ec-a70685feda00\" (UID: \"20e948a0-16c2-48f7-a9ec-a70685feda00\") " Nov 25 16:51:12 crc kubenswrapper[4812]: I1125 16:51:12.338516 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20e948a0-16c2-48f7-a9ec-a70685feda00-utilities" (OuterVolumeSpecName: "utilities") pod "20e948a0-16c2-48f7-a9ec-a70685feda00" (UID: "20e948a0-16c2-48f7-a9ec-a70685feda00"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:51:12 crc kubenswrapper[4812]: I1125 16:51:12.342496 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20e948a0-16c2-48f7-a9ec-a70685feda00-kube-api-access-2qr9s" (OuterVolumeSpecName: "kube-api-access-2qr9s") pod "20e948a0-16c2-48f7-a9ec-a70685feda00" (UID: "20e948a0-16c2-48f7-a9ec-a70685feda00"). InnerVolumeSpecName "kube-api-access-2qr9s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:51:12 crc kubenswrapper[4812]: I1125 16:51:12.354128 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20e948a0-16c2-48f7-a9ec-a70685feda00-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "20e948a0-16c2-48f7-a9ec-a70685feda00" (UID: "20e948a0-16c2-48f7-a9ec-a70685feda00"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:51:12 crc kubenswrapper[4812]: I1125 16:51:12.438874 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20e948a0-16c2-48f7-a9ec-a70685feda00-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:12 crc kubenswrapper[4812]: I1125 16:51:12.438923 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2qr9s\" (UniqueName: \"kubernetes.io/projected/20e948a0-16c2-48f7-a9ec-a70685feda00-kube-api-access-2qr9s\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:12 crc kubenswrapper[4812]: I1125 16:51:12.438949 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20e948a0-16c2-48f7-a9ec-a70685feda00-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:12 crc kubenswrapper[4812]: I1125 16:51:12.539883 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-zn9qf"] Nov 25 16:51:12 crc kubenswrapper[4812]: I1125 16:51:12.543559 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-zn9qf"] Nov 25 16:51:13 crc kubenswrapper[4812]: I1125 16:51:13.839066 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20e948a0-16c2-48f7-a9ec-a70685feda00" path="/var/lib/kubelet/pods/20e948a0-16c2-48f7-a9ec-a70685feda00/volumes" Nov 25 16:51:18 crc kubenswrapper[4812]: I1125 16:51:18.427594 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-pfrp6" Nov 25 16:51:18 crc kubenswrapper[4812]: I1125 16:51:18.427868 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-pfrp6" Nov 25 16:51:18 crc kubenswrapper[4812]: I1125 16:51:18.471196 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-pfrp6" Nov 25 16:51:18 crc kubenswrapper[4812]: I1125 16:51:18.542158 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-chhxl" Nov 25 16:51:18 crc kubenswrapper[4812]: I1125 16:51:18.542197 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-chhxl" Nov 25 16:51:18 crc kubenswrapper[4812]: I1125 16:51:18.580912 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-chhxl" Nov 25 16:51:19 crc kubenswrapper[4812]: I1125 16:51:19.283571 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-chhxl" Nov 25 16:51:19 crc kubenswrapper[4812]: I1125 16:51:19.290257 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-pfrp6" Nov 25 16:51:19 crc kubenswrapper[4812]: I1125 16:51:19.854725 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-chhxl"] Nov 25 16:51:21 crc kubenswrapper[4812]: I1125 16:51:21.256104 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-chhxl" podUID="85faff28-66f5-478c-a85a-5e6c26a50106" containerName="registry-server" containerID="cri-o://d53c1271d9e8d253d6b6879aa9feb0ccdd97f092f56a05b2c4361670151060cf" gracePeriod=2 Nov 25 16:51:21 crc kubenswrapper[4812]: I1125 16:51:21.599787 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-chhxl" Nov 25 16:51:21 crc kubenswrapper[4812]: I1125 16:51:21.749998 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85faff28-66f5-478c-a85a-5e6c26a50106-catalog-content\") pod \"85faff28-66f5-478c-a85a-5e6c26a50106\" (UID: \"85faff28-66f5-478c-a85a-5e6c26a50106\") " Nov 25 16:51:21 crc kubenswrapper[4812]: I1125 16:51:21.750401 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85faff28-66f5-478c-a85a-5e6c26a50106-utilities\") pod \"85faff28-66f5-478c-a85a-5e6c26a50106\" (UID: \"85faff28-66f5-478c-a85a-5e6c26a50106\") " Nov 25 16:51:21 crc kubenswrapper[4812]: I1125 16:51:21.750432 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pd85m\" (UniqueName: \"kubernetes.io/projected/85faff28-66f5-478c-a85a-5e6c26a50106-kube-api-access-pd85m\") pod \"85faff28-66f5-478c-a85a-5e6c26a50106\" (UID: \"85faff28-66f5-478c-a85a-5e6c26a50106\") " Nov 25 16:51:21 crc kubenswrapper[4812]: I1125 16:51:21.751125 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/85faff28-66f5-478c-a85a-5e6c26a50106-utilities" (OuterVolumeSpecName: "utilities") pod "85faff28-66f5-478c-a85a-5e6c26a50106" (UID: "85faff28-66f5-478c-a85a-5e6c26a50106"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:51:21 crc kubenswrapper[4812]: I1125 16:51:21.756592 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/85faff28-66f5-478c-a85a-5e6c26a50106-kube-api-access-pd85m" (OuterVolumeSpecName: "kube-api-access-pd85m") pod "85faff28-66f5-478c-a85a-5e6c26a50106" (UID: "85faff28-66f5-478c-a85a-5e6c26a50106"). InnerVolumeSpecName "kube-api-access-pd85m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:51:21 crc kubenswrapper[4812]: I1125 16:51:21.851115 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85faff28-66f5-478c-a85a-5e6c26a50106-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:21 crc kubenswrapper[4812]: I1125 16:51:21.851139 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pd85m\" (UniqueName: \"kubernetes.io/projected/85faff28-66f5-478c-a85a-5e6c26a50106-kube-api-access-pd85m\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:21 crc kubenswrapper[4812]: I1125 16:51:21.863397 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/85faff28-66f5-478c-a85a-5e6c26a50106-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "85faff28-66f5-478c-a85a-5e6c26a50106" (UID: "85faff28-66f5-478c-a85a-5e6c26a50106"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:51:21 crc kubenswrapper[4812]: I1125 16:51:21.952851 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85faff28-66f5-478c-a85a-5e6c26a50106-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:22 crc kubenswrapper[4812]: I1125 16:51:22.266139 4812 generic.go:334] "Generic (PLEG): container finished" podID="85faff28-66f5-478c-a85a-5e6c26a50106" containerID="d53c1271d9e8d253d6b6879aa9feb0ccdd97f092f56a05b2c4361670151060cf" exitCode=0 Nov 25 16:51:22 crc kubenswrapper[4812]: I1125 16:51:22.266193 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-chhxl" event={"ID":"85faff28-66f5-478c-a85a-5e6c26a50106","Type":"ContainerDied","Data":"d53c1271d9e8d253d6b6879aa9feb0ccdd97f092f56a05b2c4361670151060cf"} Nov 25 16:51:22 crc kubenswrapper[4812]: I1125 16:51:22.266232 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-chhxl" event={"ID":"85faff28-66f5-478c-a85a-5e6c26a50106","Type":"ContainerDied","Data":"a23b1cde1b3f34d7d93cf81b922fa051e4162404ca87f43d7246738ef571317f"} Nov 25 16:51:22 crc kubenswrapper[4812]: I1125 16:51:22.266226 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-chhxl" Nov 25 16:51:22 crc kubenswrapper[4812]: I1125 16:51:22.266251 4812 scope.go:117] "RemoveContainer" containerID="d53c1271d9e8d253d6b6879aa9feb0ccdd97f092f56a05b2c4361670151060cf" Nov 25 16:51:22 crc kubenswrapper[4812]: I1125 16:51:22.287335 4812 scope.go:117] "RemoveContainer" containerID="c38ddc47cf73282bc7993deb4c917398dca5e5a8cd73e14b0bfe4e6592e262e9" Nov 25 16:51:22 crc kubenswrapper[4812]: I1125 16:51:22.299459 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-chhxl"] Nov 25 16:51:22 crc kubenswrapper[4812]: I1125 16:51:22.303092 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-chhxl"] Nov 25 16:51:22 crc kubenswrapper[4812]: I1125 16:51:22.315154 4812 scope.go:117] "RemoveContainer" containerID="e1c6488f3d380b5840e80ce37cf5ba08f6aa93a719b7830bddf00c5aad8d099c" Nov 25 16:51:22 crc kubenswrapper[4812]: I1125 16:51:22.328981 4812 scope.go:117] "RemoveContainer" containerID="d53c1271d9e8d253d6b6879aa9feb0ccdd97f092f56a05b2c4361670151060cf" Nov 25 16:51:22 crc kubenswrapper[4812]: E1125 16:51:22.329366 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d53c1271d9e8d253d6b6879aa9feb0ccdd97f092f56a05b2c4361670151060cf\": container with ID starting with d53c1271d9e8d253d6b6879aa9feb0ccdd97f092f56a05b2c4361670151060cf not found: ID does not exist" containerID="d53c1271d9e8d253d6b6879aa9feb0ccdd97f092f56a05b2c4361670151060cf" Nov 25 16:51:22 crc kubenswrapper[4812]: I1125 16:51:22.329398 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d53c1271d9e8d253d6b6879aa9feb0ccdd97f092f56a05b2c4361670151060cf"} err="failed to get container status \"d53c1271d9e8d253d6b6879aa9feb0ccdd97f092f56a05b2c4361670151060cf\": rpc error: code = NotFound desc = could not find container \"d53c1271d9e8d253d6b6879aa9feb0ccdd97f092f56a05b2c4361670151060cf\": container with ID starting with d53c1271d9e8d253d6b6879aa9feb0ccdd97f092f56a05b2c4361670151060cf not found: ID does not exist" Nov 25 16:51:22 crc kubenswrapper[4812]: I1125 16:51:22.329419 4812 scope.go:117] "RemoveContainer" containerID="c38ddc47cf73282bc7993deb4c917398dca5e5a8cd73e14b0bfe4e6592e262e9" Nov 25 16:51:22 crc kubenswrapper[4812]: E1125 16:51:22.329903 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c38ddc47cf73282bc7993deb4c917398dca5e5a8cd73e14b0bfe4e6592e262e9\": container with ID starting with c38ddc47cf73282bc7993deb4c917398dca5e5a8cd73e14b0bfe4e6592e262e9 not found: ID does not exist" containerID="c38ddc47cf73282bc7993deb4c917398dca5e5a8cd73e14b0bfe4e6592e262e9" Nov 25 16:51:22 crc kubenswrapper[4812]: I1125 16:51:22.329954 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c38ddc47cf73282bc7993deb4c917398dca5e5a8cd73e14b0bfe4e6592e262e9"} err="failed to get container status \"c38ddc47cf73282bc7993deb4c917398dca5e5a8cd73e14b0bfe4e6592e262e9\": rpc error: code = NotFound desc = could not find container \"c38ddc47cf73282bc7993deb4c917398dca5e5a8cd73e14b0bfe4e6592e262e9\": container with ID starting with c38ddc47cf73282bc7993deb4c917398dca5e5a8cd73e14b0bfe4e6592e262e9 not found: ID does not exist" Nov 25 16:51:22 crc kubenswrapper[4812]: I1125 16:51:22.329989 4812 scope.go:117] "RemoveContainer" containerID="e1c6488f3d380b5840e80ce37cf5ba08f6aa93a719b7830bddf00c5aad8d099c" Nov 25 16:51:22 crc kubenswrapper[4812]: E1125 16:51:22.330564 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e1c6488f3d380b5840e80ce37cf5ba08f6aa93a719b7830bddf00c5aad8d099c\": container with ID starting with e1c6488f3d380b5840e80ce37cf5ba08f6aa93a719b7830bddf00c5aad8d099c not found: ID does not exist" containerID="e1c6488f3d380b5840e80ce37cf5ba08f6aa93a719b7830bddf00c5aad8d099c" Nov 25 16:51:22 crc kubenswrapper[4812]: I1125 16:51:22.330589 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1c6488f3d380b5840e80ce37cf5ba08f6aa93a719b7830bddf00c5aad8d099c"} err="failed to get container status \"e1c6488f3d380b5840e80ce37cf5ba08f6aa93a719b7830bddf00c5aad8d099c\": rpc error: code = NotFound desc = could not find container \"e1c6488f3d380b5840e80ce37cf5ba08f6aa93a719b7830bddf00c5aad8d099c\": container with ID starting with e1c6488f3d380b5840e80ce37cf5ba08f6aa93a719b7830bddf00c5aad8d099c not found: ID does not exist" Nov 25 16:51:23 crc kubenswrapper[4812]: I1125 16:51:23.837655 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="85faff28-66f5-478c-a85a-5e6c26a50106" path="/var/lib/kubelet/pods/85faff28-66f5-478c-a85a-5e6c26a50106/volumes" Nov 25 16:51:29 crc kubenswrapper[4812]: I1125 16:51:29.511086 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" podUID="8bad98aa-94be-4024-8cb5-dc6078ffec1f" containerName="oauth-openshift" containerID="cri-o://cb5999fe4e3d3282f1c1e6ae6a20006d31ace8a64e83fc4b048795d174e0ad68" gracePeriod=15 Nov 25 16:51:29 crc kubenswrapper[4812]: I1125 16:51:29.897867 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:51:29 crc kubenswrapper[4812]: I1125 16:51:29.945581 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-user-template-login\") pod \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " Nov 25 16:51:29 crc kubenswrapper[4812]: I1125 16:51:29.945643 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4bmmn\" (UniqueName: \"kubernetes.io/projected/8bad98aa-94be-4024-8cb5-dc6078ffec1f-kube-api-access-4bmmn\") pod \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " Nov 25 16:51:29 crc kubenswrapper[4812]: I1125 16:51:29.945683 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-serving-cert\") pod \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " Nov 25 16:51:29 crc kubenswrapper[4812]: I1125 16:51:29.945710 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-user-template-provider-selection\") pod \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " Nov 25 16:51:29 crc kubenswrapper[4812]: I1125 16:51:29.945745 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-trusted-ca-bundle\") pod \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " Nov 25 16:51:29 crc kubenswrapper[4812]: I1125 16:51:29.945775 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/8bad98aa-94be-4024-8cb5-dc6078ffec1f-audit-policies\") pod \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " Nov 25 16:51:29 crc kubenswrapper[4812]: I1125 16:51:29.945807 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-service-ca\") pod \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " Nov 25 16:51:29 crc kubenswrapper[4812]: I1125 16:51:29.945827 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-cliconfig\") pod \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " Nov 25 16:51:29 crc kubenswrapper[4812]: I1125 16:51:29.945847 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/8bad98aa-94be-4024-8cb5-dc6078ffec1f-audit-dir\") pod \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " Nov 25 16:51:29 crc kubenswrapper[4812]: I1125 16:51:29.945872 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-router-certs\") pod \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " Nov 25 16:51:29 crc kubenswrapper[4812]: I1125 16:51:29.945898 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-user-idp-0-file-data\") pod \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " Nov 25 16:51:29 crc kubenswrapper[4812]: I1125 16:51:29.945923 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-session\") pod \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " Nov 25 16:51:29 crc kubenswrapper[4812]: I1125 16:51:29.945948 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-ocp-branding-template\") pod \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " Nov 25 16:51:29 crc kubenswrapper[4812]: I1125 16:51:29.945976 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-user-template-error\") pod \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\" (UID: \"8bad98aa-94be-4024-8cb5-dc6078ffec1f\") " Nov 25 16:51:29 crc kubenswrapper[4812]: I1125 16:51:29.947390 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8bad98aa-94be-4024-8cb5-dc6078ffec1f-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "8bad98aa-94be-4024-8cb5-dc6078ffec1f" (UID: "8bad98aa-94be-4024-8cb5-dc6078ffec1f"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:51:29 crc kubenswrapper[4812]: I1125 16:51:29.947445 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8bad98aa-94be-4024-8cb5-dc6078ffec1f-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "8bad98aa-94be-4024-8cb5-dc6078ffec1f" (UID: "8bad98aa-94be-4024-8cb5-dc6078ffec1f"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 16:51:29 crc kubenswrapper[4812]: I1125 16:51:29.947371 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "8bad98aa-94be-4024-8cb5-dc6078ffec1f" (UID: "8bad98aa-94be-4024-8cb5-dc6078ffec1f"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:51:29 crc kubenswrapper[4812]: I1125 16:51:29.947270 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "8bad98aa-94be-4024-8cb5-dc6078ffec1f" (UID: "8bad98aa-94be-4024-8cb5-dc6078ffec1f"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:51:29 crc kubenswrapper[4812]: I1125 16:51:29.947720 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "8bad98aa-94be-4024-8cb5-dc6078ffec1f" (UID: "8bad98aa-94be-4024-8cb5-dc6078ffec1f"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:51:29 crc kubenswrapper[4812]: I1125 16:51:29.953049 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "8bad98aa-94be-4024-8cb5-dc6078ffec1f" (UID: "8bad98aa-94be-4024-8cb5-dc6078ffec1f"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:51:29 crc kubenswrapper[4812]: I1125 16:51:29.953173 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8bad98aa-94be-4024-8cb5-dc6078ffec1f-kube-api-access-4bmmn" (OuterVolumeSpecName: "kube-api-access-4bmmn") pod "8bad98aa-94be-4024-8cb5-dc6078ffec1f" (UID: "8bad98aa-94be-4024-8cb5-dc6078ffec1f"). InnerVolumeSpecName "kube-api-access-4bmmn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:51:29 crc kubenswrapper[4812]: I1125 16:51:29.953411 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "8bad98aa-94be-4024-8cb5-dc6078ffec1f" (UID: "8bad98aa-94be-4024-8cb5-dc6078ffec1f"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:51:29 crc kubenswrapper[4812]: I1125 16:51:29.954661 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "8bad98aa-94be-4024-8cb5-dc6078ffec1f" (UID: "8bad98aa-94be-4024-8cb5-dc6078ffec1f"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:51:29 crc kubenswrapper[4812]: I1125 16:51:29.955114 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "8bad98aa-94be-4024-8cb5-dc6078ffec1f" (UID: "8bad98aa-94be-4024-8cb5-dc6078ffec1f"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:51:29 crc kubenswrapper[4812]: I1125 16:51:29.955333 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "8bad98aa-94be-4024-8cb5-dc6078ffec1f" (UID: "8bad98aa-94be-4024-8cb5-dc6078ffec1f"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:51:29 crc kubenswrapper[4812]: I1125 16:51:29.957144 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "8bad98aa-94be-4024-8cb5-dc6078ffec1f" (UID: "8bad98aa-94be-4024-8cb5-dc6078ffec1f"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:51:29 crc kubenswrapper[4812]: I1125 16:51:29.957187 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "8bad98aa-94be-4024-8cb5-dc6078ffec1f" (UID: "8bad98aa-94be-4024-8cb5-dc6078ffec1f"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:51:29 crc kubenswrapper[4812]: I1125 16:51:29.957339 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "8bad98aa-94be-4024-8cb5-dc6078ffec1f" (UID: "8bad98aa-94be-4024-8cb5-dc6078ffec1f"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:51:30 crc kubenswrapper[4812]: I1125 16:51:30.047387 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4bmmn\" (UniqueName: \"kubernetes.io/projected/8bad98aa-94be-4024-8cb5-dc6078ffec1f-kube-api-access-4bmmn\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:30 crc kubenswrapper[4812]: I1125 16:51:30.047445 4812 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:30 crc kubenswrapper[4812]: I1125 16:51:30.047463 4812 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:30 crc kubenswrapper[4812]: I1125 16:51:30.047478 4812 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:30 crc kubenswrapper[4812]: I1125 16:51:30.047492 4812 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/8bad98aa-94be-4024-8cb5-dc6078ffec1f-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:30 crc kubenswrapper[4812]: I1125 16:51:30.047503 4812 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:30 crc kubenswrapper[4812]: I1125 16:51:30.047513 4812 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:30 crc kubenswrapper[4812]: I1125 16:51:30.047526 4812 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/8bad98aa-94be-4024-8cb5-dc6078ffec1f-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:30 crc kubenswrapper[4812]: I1125 16:51:30.047557 4812 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:30 crc kubenswrapper[4812]: I1125 16:51:30.047570 4812 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:30 crc kubenswrapper[4812]: I1125 16:51:30.047583 4812 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:30 crc kubenswrapper[4812]: I1125 16:51:30.047594 4812 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:30 crc kubenswrapper[4812]: I1125 16:51:30.047605 4812 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:30 crc kubenswrapper[4812]: I1125 16:51:30.047617 4812 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/8bad98aa-94be-4024-8cb5-dc6078ffec1f-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:30 crc kubenswrapper[4812]: I1125 16:51:30.315183 4812 generic.go:334] "Generic (PLEG): container finished" podID="8bad98aa-94be-4024-8cb5-dc6078ffec1f" containerID="cb5999fe4e3d3282f1c1e6ae6a20006d31ace8a64e83fc4b048795d174e0ad68" exitCode=0 Nov 25 16:51:30 crc kubenswrapper[4812]: I1125 16:51:30.315240 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" event={"ID":"8bad98aa-94be-4024-8cb5-dc6078ffec1f","Type":"ContainerDied","Data":"cb5999fe4e3d3282f1c1e6ae6a20006d31ace8a64e83fc4b048795d174e0ad68"} Nov 25 16:51:30 crc kubenswrapper[4812]: I1125 16:51:30.315376 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" event={"ID":"8bad98aa-94be-4024-8cb5-dc6078ffec1f","Type":"ContainerDied","Data":"2e5b182a27686b156843e05e272fbad75f7befd37c685ae9883213468f05a11d"} Nov 25 16:51:30 crc kubenswrapper[4812]: I1125 16:51:30.315406 4812 scope.go:117] "RemoveContainer" containerID="cb5999fe4e3d3282f1c1e6ae6a20006d31ace8a64e83fc4b048795d174e0ad68" Nov 25 16:51:30 crc kubenswrapper[4812]: I1125 16:51:30.315295 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-fjv5r" Nov 25 16:51:30 crc kubenswrapper[4812]: I1125 16:51:30.335966 4812 scope.go:117] "RemoveContainer" containerID="cb5999fe4e3d3282f1c1e6ae6a20006d31ace8a64e83fc4b048795d174e0ad68" Nov 25 16:51:30 crc kubenswrapper[4812]: E1125 16:51:30.336665 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cb5999fe4e3d3282f1c1e6ae6a20006d31ace8a64e83fc4b048795d174e0ad68\": container with ID starting with cb5999fe4e3d3282f1c1e6ae6a20006d31ace8a64e83fc4b048795d174e0ad68 not found: ID does not exist" containerID="cb5999fe4e3d3282f1c1e6ae6a20006d31ace8a64e83fc4b048795d174e0ad68" Nov 25 16:51:30 crc kubenswrapper[4812]: I1125 16:51:30.336738 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb5999fe4e3d3282f1c1e6ae6a20006d31ace8a64e83fc4b048795d174e0ad68"} err="failed to get container status \"cb5999fe4e3d3282f1c1e6ae6a20006d31ace8a64e83fc4b048795d174e0ad68\": rpc error: code = NotFound desc = could not find container \"cb5999fe4e3d3282f1c1e6ae6a20006d31ace8a64e83fc4b048795d174e0ad68\": container with ID starting with cb5999fe4e3d3282f1c1e6ae6a20006d31ace8a64e83fc4b048795d174e0ad68 not found: ID does not exist" Nov 25 16:51:30 crc kubenswrapper[4812]: I1125 16:51:30.343357 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-fjv5r"] Nov 25 16:51:30 crc kubenswrapper[4812]: I1125 16:51:30.349552 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-fjv5r"] Nov 25 16:51:31 crc kubenswrapper[4812]: I1125 16:51:31.838109 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8bad98aa-94be-4024-8cb5-dc6078ffec1f" path="/var/lib/kubelet/pods/8bad98aa-94be-4024-8cb5-dc6078ffec1f/volumes" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.001906 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t"] Nov 25 16:51:34 crc kubenswrapper[4812]: E1125 16:51:34.002391 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="906a6b44-74ed-468a-8519-c269d04cf34b" containerName="extract-content" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.002403 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="906a6b44-74ed-468a-8519-c269d04cf34b" containerName="extract-content" Nov 25 16:51:34 crc kubenswrapper[4812]: E1125 16:51:34.002415 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20e948a0-16c2-48f7-a9ec-a70685feda00" containerName="registry-server" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.002420 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="20e948a0-16c2-48f7-a9ec-a70685feda00" containerName="registry-server" Nov 25 16:51:34 crc kubenswrapper[4812]: E1125 16:51:34.002431 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85faff28-66f5-478c-a85a-5e6c26a50106" containerName="registry-server" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.002437 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="85faff28-66f5-478c-a85a-5e6c26a50106" containerName="registry-server" Nov 25 16:51:34 crc kubenswrapper[4812]: E1125 16:51:34.002446 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="906a6b44-74ed-468a-8519-c269d04cf34b" containerName="registry-server" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.002451 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="906a6b44-74ed-468a-8519-c269d04cf34b" containerName="registry-server" Nov 25 16:51:34 crc kubenswrapper[4812]: E1125 16:51:34.002460 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56af28bf-996b-442f-9caa-87f498b292cf" containerName="registry-server" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.002465 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="56af28bf-996b-442f-9caa-87f498b292cf" containerName="registry-server" Nov 25 16:51:34 crc kubenswrapper[4812]: E1125 16:51:34.002473 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85faff28-66f5-478c-a85a-5e6c26a50106" containerName="extract-content" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.002479 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="85faff28-66f5-478c-a85a-5e6c26a50106" containerName="extract-content" Nov 25 16:51:34 crc kubenswrapper[4812]: E1125 16:51:34.002486 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="906a6b44-74ed-468a-8519-c269d04cf34b" containerName="extract-utilities" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.002491 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="906a6b44-74ed-468a-8519-c269d04cf34b" containerName="extract-utilities" Nov 25 16:51:34 crc kubenswrapper[4812]: E1125 16:51:34.002501 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56af28bf-996b-442f-9caa-87f498b292cf" containerName="extract-content" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.002507 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="56af28bf-996b-442f-9caa-87f498b292cf" containerName="extract-content" Nov 25 16:51:34 crc kubenswrapper[4812]: E1125 16:51:34.002515 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b8765a2-e7c2-4810-b7a8-dcca6732d618" containerName="pruner" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.002520 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b8765a2-e7c2-4810-b7a8-dcca6732d618" containerName="pruner" Nov 25 16:51:34 crc kubenswrapper[4812]: E1125 16:51:34.002545 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56af28bf-996b-442f-9caa-87f498b292cf" containerName="extract-utilities" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.002552 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="56af28bf-996b-442f-9caa-87f498b292cf" containerName="extract-utilities" Nov 25 16:51:34 crc kubenswrapper[4812]: E1125 16:51:34.002561 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bad98aa-94be-4024-8cb5-dc6078ffec1f" containerName="oauth-openshift" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.002566 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bad98aa-94be-4024-8cb5-dc6078ffec1f" containerName="oauth-openshift" Nov 25 16:51:34 crc kubenswrapper[4812]: E1125 16:51:34.002574 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20e948a0-16c2-48f7-a9ec-a70685feda00" containerName="extract-utilities" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.002579 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="20e948a0-16c2-48f7-a9ec-a70685feda00" containerName="extract-utilities" Nov 25 16:51:34 crc kubenswrapper[4812]: E1125 16:51:34.002588 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c3946cb-ff76-48e1-a548-143540c9772c" containerName="pruner" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.002594 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c3946cb-ff76-48e1-a548-143540c9772c" containerName="pruner" Nov 25 16:51:34 crc kubenswrapper[4812]: E1125 16:51:34.002600 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85faff28-66f5-478c-a85a-5e6c26a50106" containerName="extract-utilities" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.002606 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="85faff28-66f5-478c-a85a-5e6c26a50106" containerName="extract-utilities" Nov 25 16:51:34 crc kubenswrapper[4812]: E1125 16:51:34.002612 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20e948a0-16c2-48f7-a9ec-a70685feda00" containerName="extract-content" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.002617 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="20e948a0-16c2-48f7-a9ec-a70685feda00" containerName="extract-content" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.002698 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="85faff28-66f5-478c-a85a-5e6c26a50106" containerName="registry-server" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.002706 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="8bad98aa-94be-4024-8cb5-dc6078ffec1f" containerName="oauth-openshift" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.002718 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c3946cb-ff76-48e1-a548-143540c9772c" containerName="pruner" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.002726 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b8765a2-e7c2-4810-b7a8-dcca6732d618" containerName="pruner" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.002733 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="56af28bf-996b-442f-9caa-87f498b292cf" containerName="registry-server" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.002740 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="906a6b44-74ed-468a-8519-c269d04cf34b" containerName="registry-server" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.002747 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="20e948a0-16c2-48f7-a9ec-a70685feda00" containerName="registry-server" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.003090 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.005002 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.005087 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.005869 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.007582 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.007612 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.007723 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.007734 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.007780 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.008286 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.008431 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.008712 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.010064 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.014616 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.016954 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.020480 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.024981 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t"] Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.095093 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6987edf0-2653-4a17-95db-3848f48b5c14-audit-dir\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.095151 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6987edf0-2653-4a17-95db-3848f48b5c14-audit-policies\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.095181 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/6987edf0-2653-4a17-95db-3848f48b5c14-v4-0-config-system-serving-cert\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.095219 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/6987edf0-2653-4a17-95db-3848f48b5c14-v4-0-config-system-session\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.095252 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/6987edf0-2653-4a17-95db-3848f48b5c14-v4-0-config-user-template-login\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.095280 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v8cmc\" (UniqueName: \"kubernetes.io/projected/6987edf0-2653-4a17-95db-3848f48b5c14-kube-api-access-v8cmc\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.095307 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/6987edf0-2653-4a17-95db-3848f48b5c14-v4-0-config-user-template-error\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.095337 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/6987edf0-2653-4a17-95db-3848f48b5c14-v4-0-config-system-cliconfig\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.095363 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/6987edf0-2653-4a17-95db-3848f48b5c14-v4-0-config-system-router-certs\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.095404 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6987edf0-2653-4a17-95db-3848f48b5c14-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.095430 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/6987edf0-2653-4a17-95db-3848f48b5c14-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.095472 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/6987edf0-2653-4a17-95db-3848f48b5c14-v4-0-config-system-service-ca\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.095497 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/6987edf0-2653-4a17-95db-3848f48b5c14-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.095523 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/6987edf0-2653-4a17-95db-3848f48b5c14-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.196420 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/6987edf0-2653-4a17-95db-3848f48b5c14-v4-0-config-system-session\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.196486 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/6987edf0-2653-4a17-95db-3848f48b5c14-v4-0-config-user-template-login\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.196516 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v8cmc\" (UniqueName: \"kubernetes.io/projected/6987edf0-2653-4a17-95db-3848f48b5c14-kube-api-access-v8cmc\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.196567 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/6987edf0-2653-4a17-95db-3848f48b5c14-v4-0-config-user-template-error\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.196594 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/6987edf0-2653-4a17-95db-3848f48b5c14-v4-0-config-system-cliconfig\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.196622 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/6987edf0-2653-4a17-95db-3848f48b5c14-v4-0-config-system-router-certs\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.196659 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6987edf0-2653-4a17-95db-3848f48b5c14-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.196683 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/6987edf0-2653-4a17-95db-3848f48b5c14-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.196717 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/6987edf0-2653-4a17-95db-3848f48b5c14-v4-0-config-system-service-ca\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.196737 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/6987edf0-2653-4a17-95db-3848f48b5c14-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.196764 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/6987edf0-2653-4a17-95db-3848f48b5c14-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.196793 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6987edf0-2653-4a17-95db-3848f48b5c14-audit-dir\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.196821 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6987edf0-2653-4a17-95db-3848f48b5c14-audit-policies\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.196843 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/6987edf0-2653-4a17-95db-3848f48b5c14-v4-0-config-system-serving-cert\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.198515 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/6987edf0-2653-4a17-95db-3848f48b5c14-v4-0-config-system-cliconfig\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.198598 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6987edf0-2653-4a17-95db-3848f48b5c14-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.198663 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6987edf0-2653-4a17-95db-3848f48b5c14-audit-dir\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.199571 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6987edf0-2653-4a17-95db-3848f48b5c14-audit-policies\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.200073 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/6987edf0-2653-4a17-95db-3848f48b5c14-v4-0-config-system-service-ca\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.203343 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/6987edf0-2653-4a17-95db-3848f48b5c14-v4-0-config-user-template-error\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.203726 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/6987edf0-2653-4a17-95db-3848f48b5c14-v4-0-config-system-session\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.204444 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/6987edf0-2653-4a17-95db-3848f48b5c14-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.205555 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/6987edf0-2653-4a17-95db-3848f48b5c14-v4-0-config-system-router-certs\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.205970 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/6987edf0-2653-4a17-95db-3848f48b5c14-v4-0-config-user-template-login\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.207674 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/6987edf0-2653-4a17-95db-3848f48b5c14-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.209998 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/6987edf0-2653-4a17-95db-3848f48b5c14-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.210336 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/6987edf0-2653-4a17-95db-3848f48b5c14-v4-0-config-system-serving-cert\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.223892 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v8cmc\" (UniqueName: \"kubernetes.io/projected/6987edf0-2653-4a17-95db-3848f48b5c14-kube-api-access-v8cmc\") pod \"oauth-openshift-69bcbbd7f8-rxn8t\" (UID: \"6987edf0-2653-4a17-95db-3848f48b5c14\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.321087 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:34 crc kubenswrapper[4812]: I1125 16:51:34.741444 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t"] Nov 25 16:51:35 crc kubenswrapper[4812]: I1125 16:51:35.342522 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" event={"ID":"6987edf0-2653-4a17-95db-3848f48b5c14","Type":"ContainerStarted","Data":"7c77eee275c2249c1251cef9895728ebe1b1cb454ceca16a5eee3170134d4b18"} Nov 25 16:51:35 crc kubenswrapper[4812]: I1125 16:51:35.342910 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" event={"ID":"6987edf0-2653-4a17-95db-3848f48b5c14","Type":"ContainerStarted","Data":"df9a54f61aeddec446cbb920c44ddb79559d250b433bf460dfb86c3d1ca29e26"} Nov 25 16:51:35 crc kubenswrapper[4812]: I1125 16:51:35.342929 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:35 crc kubenswrapper[4812]: I1125 16:51:35.371673 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" podStartSLOduration=31.371658143 podStartE2EDuration="31.371658143s" podCreationTimestamp="2025-11-25 16:51:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:51:35.365270996 +0000 UTC m=+270.205413111" watchObservedRunningTime="2025-11-25 16:51:35.371658143 +0000 UTC m=+270.211800238" Nov 25 16:51:35 crc kubenswrapper[4812]: I1125 16:51:35.474554 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-69bcbbd7f8-rxn8t" Nov 25 16:51:53 crc kubenswrapper[4812]: I1125 16:51:53.767470 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-whrrz"] Nov 25 16:51:53 crc kubenswrapper[4812]: I1125 16:51:53.768518 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-whrrz" podUID="17f59f39-a958-4cb4-8a6a-679e7f08a13b" containerName="registry-server" containerID="cri-o://b4f6b2a396991e14acfc471e6cfbcbcf91dda6fffe8004359f12e45b50e2af19" gracePeriod=30 Nov 25 16:51:53 crc kubenswrapper[4812]: I1125 16:51:53.773064 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-49nlv"] Nov 25 16:51:53 crc kubenswrapper[4812]: I1125 16:51:53.773279 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-49nlv" podUID="c1f1e583-20dd-4501-bb66-a4ee8239367a" containerName="registry-server" containerID="cri-o://49ba22f4a117f9f3b1485d8b4d4a8581d15157c6def05b2140c1724e89c4c675" gracePeriod=30 Nov 25 16:51:53 crc kubenswrapper[4812]: I1125 16:51:53.786775 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-v7lp8"] Nov 25 16:51:53 crc kubenswrapper[4812]: I1125 16:51:53.786996 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-v7lp8" podUID="be7271bc-142b-4cff-aabd-8a69d6373849" containerName="marketplace-operator" containerID="cri-o://e56b3180da629bf5ccbf92db9a499ee16b21ac5154d01e8ea45231c00fed9d38" gracePeriod=30 Nov 25 16:51:53 crc kubenswrapper[4812]: I1125 16:51:53.799811 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jwjxx"] Nov 25 16:51:53 crc kubenswrapper[4812]: I1125 16:51:53.800132 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-jwjxx" podUID="df156ba1-d2fb-45ea-bffe-4b2bdeb18d72" containerName="registry-server" containerID="cri-o://5b9dafe90b710850847699dab05556773af8ad5bb9f392dcec9aeb8a115d84f5" gracePeriod=30 Nov 25 16:51:53 crc kubenswrapper[4812]: I1125 16:51:53.809306 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pfrp6"] Nov 25 16:51:53 crc kubenswrapper[4812]: I1125 16:51:53.809670 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-pfrp6" podUID="f2cd8a2b-d216-49b7-b86c-fa6b743f238d" containerName="registry-server" containerID="cri-o://af0e44f49772d94b0a6010454b70e955e73f1bc92febad03fe7cb6139ea4f291" gracePeriod=30 Nov 25 16:51:53 crc kubenswrapper[4812]: I1125 16:51:53.816428 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-wsrf4"] Nov 25 16:51:53 crc kubenswrapper[4812]: I1125 16:51:53.817233 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-wsrf4" Nov 25 16:51:53 crc kubenswrapper[4812]: I1125 16:51:53.838808 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-wsrf4"] Nov 25 16:51:53 crc kubenswrapper[4812]: I1125 16:51:53.847638 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/59893999-475f-45a2-9969-648b6133d46d-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-wsrf4\" (UID: \"59893999-475f-45a2-9969-648b6133d46d\") " pod="openshift-marketplace/marketplace-operator-79b997595-wsrf4" Nov 25 16:51:53 crc kubenswrapper[4812]: I1125 16:51:53.847735 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/59893999-475f-45a2-9969-648b6133d46d-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-wsrf4\" (UID: \"59893999-475f-45a2-9969-648b6133d46d\") " pod="openshift-marketplace/marketplace-operator-79b997595-wsrf4" Nov 25 16:51:53 crc kubenswrapper[4812]: I1125 16:51:53.847781 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4zr8r\" (UniqueName: \"kubernetes.io/projected/59893999-475f-45a2-9969-648b6133d46d-kube-api-access-4zr8r\") pod \"marketplace-operator-79b997595-wsrf4\" (UID: \"59893999-475f-45a2-9969-648b6133d46d\") " pod="openshift-marketplace/marketplace-operator-79b997595-wsrf4" Nov 25 16:51:53 crc kubenswrapper[4812]: I1125 16:51:53.948554 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/59893999-475f-45a2-9969-648b6133d46d-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-wsrf4\" (UID: \"59893999-475f-45a2-9969-648b6133d46d\") " pod="openshift-marketplace/marketplace-operator-79b997595-wsrf4" Nov 25 16:51:53 crc kubenswrapper[4812]: I1125 16:51:53.948999 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4zr8r\" (UniqueName: \"kubernetes.io/projected/59893999-475f-45a2-9969-648b6133d46d-kube-api-access-4zr8r\") pod \"marketplace-operator-79b997595-wsrf4\" (UID: \"59893999-475f-45a2-9969-648b6133d46d\") " pod="openshift-marketplace/marketplace-operator-79b997595-wsrf4" Nov 25 16:51:53 crc kubenswrapper[4812]: I1125 16:51:53.949067 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/59893999-475f-45a2-9969-648b6133d46d-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-wsrf4\" (UID: \"59893999-475f-45a2-9969-648b6133d46d\") " pod="openshift-marketplace/marketplace-operator-79b997595-wsrf4" Nov 25 16:51:53 crc kubenswrapper[4812]: I1125 16:51:53.951079 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/59893999-475f-45a2-9969-648b6133d46d-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-wsrf4\" (UID: \"59893999-475f-45a2-9969-648b6133d46d\") " pod="openshift-marketplace/marketplace-operator-79b997595-wsrf4" Nov 25 16:51:53 crc kubenswrapper[4812]: I1125 16:51:53.959104 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/59893999-475f-45a2-9969-648b6133d46d-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-wsrf4\" (UID: \"59893999-475f-45a2-9969-648b6133d46d\") " pod="openshift-marketplace/marketplace-operator-79b997595-wsrf4" Nov 25 16:51:53 crc kubenswrapper[4812]: I1125 16:51:53.972596 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4zr8r\" (UniqueName: \"kubernetes.io/projected/59893999-475f-45a2-9969-648b6133d46d-kube-api-access-4zr8r\") pod \"marketplace-operator-79b997595-wsrf4\" (UID: \"59893999-475f-45a2-9969-648b6133d46d\") " pod="openshift-marketplace/marketplace-operator-79b997595-wsrf4" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.170421 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-wsrf4" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.176811 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-whrrz" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.251088 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pfrp6" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.279521 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-49nlv" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.280151 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-v7lp8" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.287791 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jwjxx" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.357487 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/17f59f39-a958-4cb4-8a6a-679e7f08a13b-utilities\") pod \"17f59f39-a958-4cb4-8a6a-679e7f08a13b\" (UID: \"17f59f39-a958-4cb4-8a6a-679e7f08a13b\") " Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.357572 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9ms8g\" (UniqueName: \"kubernetes.io/projected/17f59f39-a958-4cb4-8a6a-679e7f08a13b-kube-api-access-9ms8g\") pod \"17f59f39-a958-4cb4-8a6a-679e7f08a13b\" (UID: \"17f59f39-a958-4cb4-8a6a-679e7f08a13b\") " Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.357810 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/17f59f39-a958-4cb4-8a6a-679e7f08a13b-catalog-content\") pod \"17f59f39-a958-4cb4-8a6a-679e7f08a13b\" (UID: \"17f59f39-a958-4cb4-8a6a-679e7f08a13b\") " Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.357858 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f2cd8a2b-d216-49b7-b86c-fa6b743f238d-catalog-content\") pod \"f2cd8a2b-d216-49b7-b86c-fa6b743f238d\" (UID: \"f2cd8a2b-d216-49b7-b86c-fa6b743f238d\") " Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.357905 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f2cd8a2b-d216-49b7-b86c-fa6b743f238d-utilities\") pod \"f2cd8a2b-d216-49b7-b86c-fa6b743f238d\" (UID: \"f2cd8a2b-d216-49b7-b86c-fa6b743f238d\") " Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.357977 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mrxgh\" (UniqueName: \"kubernetes.io/projected/f2cd8a2b-d216-49b7-b86c-fa6b743f238d-kube-api-access-mrxgh\") pod \"f2cd8a2b-d216-49b7-b86c-fa6b743f238d\" (UID: \"f2cd8a2b-d216-49b7-b86c-fa6b743f238d\") " Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.363007 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f2cd8a2b-d216-49b7-b86c-fa6b743f238d-utilities" (OuterVolumeSpecName: "utilities") pod "f2cd8a2b-d216-49b7-b86c-fa6b743f238d" (UID: "f2cd8a2b-d216-49b7-b86c-fa6b743f238d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.363383 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/17f59f39-a958-4cb4-8a6a-679e7f08a13b-utilities" (OuterVolumeSpecName: "utilities") pod "17f59f39-a958-4cb4-8a6a-679e7f08a13b" (UID: "17f59f39-a958-4cb4-8a6a-679e7f08a13b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.366323 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f2cd8a2b-d216-49b7-b86c-fa6b743f238d-kube-api-access-mrxgh" (OuterVolumeSpecName: "kube-api-access-mrxgh") pod "f2cd8a2b-d216-49b7-b86c-fa6b743f238d" (UID: "f2cd8a2b-d216-49b7-b86c-fa6b743f238d"). InnerVolumeSpecName "kube-api-access-mrxgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.366524 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/17f59f39-a958-4cb4-8a6a-679e7f08a13b-kube-api-access-9ms8g" (OuterVolumeSpecName: "kube-api-access-9ms8g") pod "17f59f39-a958-4cb4-8a6a-679e7f08a13b" (UID: "17f59f39-a958-4cb4-8a6a-679e7f08a13b"). InnerVolumeSpecName "kube-api-access-9ms8g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.408273 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/17f59f39-a958-4cb4-8a6a-679e7f08a13b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "17f59f39-a958-4cb4-8a6a-679e7f08a13b" (UID: "17f59f39-a958-4cb4-8a6a-679e7f08a13b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.448642 4812 generic.go:334] "Generic (PLEG): container finished" podID="17f59f39-a958-4cb4-8a6a-679e7f08a13b" containerID="b4f6b2a396991e14acfc471e6cfbcbcf91dda6fffe8004359f12e45b50e2af19" exitCode=0 Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.448708 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-whrrz" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.448741 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-whrrz" event={"ID":"17f59f39-a958-4cb4-8a6a-679e7f08a13b","Type":"ContainerDied","Data":"b4f6b2a396991e14acfc471e6cfbcbcf91dda6fffe8004359f12e45b50e2af19"} Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.448870 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-whrrz" event={"ID":"17f59f39-a958-4cb4-8a6a-679e7f08a13b","Type":"ContainerDied","Data":"d6c55dcc7da5e155552e9706b789addefa5aad14264b0e66046c202c15d8bdc9"} Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.448905 4812 scope.go:117] "RemoveContainer" containerID="b4f6b2a396991e14acfc471e6cfbcbcf91dda6fffe8004359f12e45b50e2af19" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.451591 4812 generic.go:334] "Generic (PLEG): container finished" podID="c1f1e583-20dd-4501-bb66-a4ee8239367a" containerID="49ba22f4a117f9f3b1485d8b4d4a8581d15157c6def05b2140c1724e89c4c675" exitCode=0 Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.451685 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-49nlv" event={"ID":"c1f1e583-20dd-4501-bb66-a4ee8239367a","Type":"ContainerDied","Data":"49ba22f4a117f9f3b1485d8b4d4a8581d15157c6def05b2140c1724e89c4c675"} Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.452025 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-49nlv" event={"ID":"c1f1e583-20dd-4501-bb66-a4ee8239367a","Type":"ContainerDied","Data":"b060737aaaed9ca88cbd9871a4172a7439a7b3daf621eddf0114d45a05d0089b"} Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.451743 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-49nlv" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.455799 4812 generic.go:334] "Generic (PLEG): container finished" podID="f2cd8a2b-d216-49b7-b86c-fa6b743f238d" containerID="af0e44f49772d94b0a6010454b70e955e73f1bc92febad03fe7cb6139ea4f291" exitCode=0 Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.455891 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pfrp6" event={"ID":"f2cd8a2b-d216-49b7-b86c-fa6b743f238d","Type":"ContainerDied","Data":"af0e44f49772d94b0a6010454b70e955e73f1bc92febad03fe7cb6139ea4f291"} Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.455962 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pfrp6" event={"ID":"f2cd8a2b-d216-49b7-b86c-fa6b743f238d","Type":"ContainerDied","Data":"1c12790b30530d7d3d9766abbe361c31f9ca399242b36b593b084abc534a0357"} Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.456107 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pfrp6" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.459239 4812 generic.go:334] "Generic (PLEG): container finished" podID="be7271bc-142b-4cff-aabd-8a69d6373849" containerID="e56b3180da629bf5ccbf92db9a499ee16b21ac5154d01e8ea45231c00fed9d38" exitCode=0 Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.459345 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-v7lp8" event={"ID":"be7271bc-142b-4cff-aabd-8a69d6373849","Type":"ContainerDied","Data":"e56b3180da629bf5ccbf92db9a499ee16b21ac5154d01e8ea45231c00fed9d38"} Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.459414 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-v7lp8" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.459376 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-v7lp8" event={"ID":"be7271bc-142b-4cff-aabd-8a69d6373849","Type":"ContainerDied","Data":"77e164fbc764d8f526d52d0f2af64514bc7e9b945354f3afef6d275721f18726"} Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.459462 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m6fqs\" (UniqueName: \"kubernetes.io/projected/df156ba1-d2fb-45ea-bffe-4b2bdeb18d72-kube-api-access-m6fqs\") pod \"df156ba1-d2fb-45ea-bffe-4b2bdeb18d72\" (UID: \"df156ba1-d2fb-45ea-bffe-4b2bdeb18d72\") " Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.460310 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c1f1e583-20dd-4501-bb66-a4ee8239367a-catalog-content\") pod \"c1f1e583-20dd-4501-bb66-a4ee8239367a\" (UID: \"c1f1e583-20dd-4501-bb66-a4ee8239367a\") " Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.460490 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c1f1e583-20dd-4501-bb66-a4ee8239367a-utilities\") pod \"c1f1e583-20dd-4501-bb66-a4ee8239367a\" (UID: \"c1f1e583-20dd-4501-bb66-a4ee8239367a\") " Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.460612 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/be7271bc-142b-4cff-aabd-8a69d6373849-marketplace-trusted-ca\") pod \"be7271bc-142b-4cff-aabd-8a69d6373849\" (UID: \"be7271bc-142b-4cff-aabd-8a69d6373849\") " Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.460687 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df156ba1-d2fb-45ea-bffe-4b2bdeb18d72-catalog-content\") pod \"df156ba1-d2fb-45ea-bffe-4b2bdeb18d72\" (UID: \"df156ba1-d2fb-45ea-bffe-4b2bdeb18d72\") " Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.460903 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c4js7\" (UniqueName: \"kubernetes.io/projected/be7271bc-142b-4cff-aabd-8a69d6373849-kube-api-access-c4js7\") pod \"be7271bc-142b-4cff-aabd-8a69d6373849\" (UID: \"be7271bc-142b-4cff-aabd-8a69d6373849\") " Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.461116 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/be7271bc-142b-4cff-aabd-8a69d6373849-marketplace-operator-metrics\") pod \"be7271bc-142b-4cff-aabd-8a69d6373849\" (UID: \"be7271bc-142b-4cff-aabd-8a69d6373849\") " Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.461342 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df156ba1-d2fb-45ea-bffe-4b2bdeb18d72-utilities\") pod \"df156ba1-d2fb-45ea-bffe-4b2bdeb18d72\" (UID: \"df156ba1-d2fb-45ea-bffe-4b2bdeb18d72\") " Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.461553 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x6jjg\" (UniqueName: \"kubernetes.io/projected/c1f1e583-20dd-4501-bb66-a4ee8239367a-kube-api-access-x6jjg\") pod \"c1f1e583-20dd-4501-bb66-a4ee8239367a\" (UID: \"c1f1e583-20dd-4501-bb66-a4ee8239367a\") " Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.461906 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9ms8g\" (UniqueName: \"kubernetes.io/projected/17f59f39-a958-4cb4-8a6a-679e7f08a13b-kube-api-access-9ms8g\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.461985 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/17f59f39-a958-4cb4-8a6a-679e7f08a13b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.462056 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f2cd8a2b-d216-49b7-b86c-fa6b743f238d-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.462113 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mrxgh\" (UniqueName: \"kubernetes.io/projected/f2cd8a2b-d216-49b7-b86c-fa6b743f238d-kube-api-access-mrxgh\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.462178 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/17f59f39-a958-4cb4-8a6a-679e7f08a13b-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.463564 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/df156ba1-d2fb-45ea-bffe-4b2bdeb18d72-kube-api-access-m6fqs" (OuterVolumeSpecName: "kube-api-access-m6fqs") pod "df156ba1-d2fb-45ea-bffe-4b2bdeb18d72" (UID: "df156ba1-d2fb-45ea-bffe-4b2bdeb18d72"). InnerVolumeSpecName "kube-api-access-m6fqs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.464991 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c1f1e583-20dd-4501-bb66-a4ee8239367a-utilities" (OuterVolumeSpecName: "utilities") pod "c1f1e583-20dd-4501-bb66-a4ee8239367a" (UID: "c1f1e583-20dd-4501-bb66-a4ee8239367a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.465136 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c1f1e583-20dd-4501-bb66-a4ee8239367a-kube-api-access-x6jjg" (OuterVolumeSpecName: "kube-api-access-x6jjg") pod "c1f1e583-20dd-4501-bb66-a4ee8239367a" (UID: "c1f1e583-20dd-4501-bb66-a4ee8239367a"). InnerVolumeSpecName "kube-api-access-x6jjg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.465200 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/df156ba1-d2fb-45ea-bffe-4b2bdeb18d72-utilities" (OuterVolumeSpecName: "utilities") pod "df156ba1-d2fb-45ea-bffe-4b2bdeb18d72" (UID: "df156ba1-d2fb-45ea-bffe-4b2bdeb18d72"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.465688 4812 generic.go:334] "Generic (PLEG): container finished" podID="df156ba1-d2fb-45ea-bffe-4b2bdeb18d72" containerID="5b9dafe90b710850847699dab05556773af8ad5bb9f392dcec9aeb8a115d84f5" exitCode=0 Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.465741 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jwjxx" event={"ID":"df156ba1-d2fb-45ea-bffe-4b2bdeb18d72","Type":"ContainerDied","Data":"5b9dafe90b710850847699dab05556773af8ad5bb9f392dcec9aeb8a115d84f5"} Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.465815 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jwjxx" event={"ID":"df156ba1-d2fb-45ea-bffe-4b2bdeb18d72","Type":"ContainerDied","Data":"4ac84f42ad6ea108025a4a3a8cddcdf94c9261f81b485df74e4dfbb1df961b1b"} Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.465969 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jwjxx" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.468799 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/be7271bc-142b-4cff-aabd-8a69d6373849-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "be7271bc-142b-4cff-aabd-8a69d6373849" (UID: "be7271bc-142b-4cff-aabd-8a69d6373849"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.471034 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f2cd8a2b-d216-49b7-b86c-fa6b743f238d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f2cd8a2b-d216-49b7-b86c-fa6b743f238d" (UID: "f2cd8a2b-d216-49b7-b86c-fa6b743f238d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.471633 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be7271bc-142b-4cff-aabd-8a69d6373849-kube-api-access-c4js7" (OuterVolumeSpecName: "kube-api-access-c4js7") pod "be7271bc-142b-4cff-aabd-8a69d6373849" (UID: "be7271bc-142b-4cff-aabd-8a69d6373849"). InnerVolumeSpecName "kube-api-access-c4js7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.471921 4812 scope.go:117] "RemoveContainer" containerID="9fab543b32520406fed535c69d7779732a9d16cae46c402faaa0178240d8b478" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.476932 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be7271bc-142b-4cff-aabd-8a69d6373849-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "be7271bc-142b-4cff-aabd-8a69d6373849" (UID: "be7271bc-142b-4cff-aabd-8a69d6373849"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.493775 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/df156ba1-d2fb-45ea-bffe-4b2bdeb18d72-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "df156ba1-d2fb-45ea-bffe-4b2bdeb18d72" (UID: "df156ba1-d2fb-45ea-bffe-4b2bdeb18d72"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.495103 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-whrrz"] Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.495665 4812 scope.go:117] "RemoveContainer" containerID="1de63071db6944870c4b377267fa03c8c2456afdea90ee3f56b1f01ba6793bad" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.499367 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-whrrz"] Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.512850 4812 scope.go:117] "RemoveContainer" containerID="b4f6b2a396991e14acfc471e6cfbcbcf91dda6fffe8004359f12e45b50e2af19" Nov 25 16:51:54 crc kubenswrapper[4812]: E1125 16:51:54.513355 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b4f6b2a396991e14acfc471e6cfbcbcf91dda6fffe8004359f12e45b50e2af19\": container with ID starting with b4f6b2a396991e14acfc471e6cfbcbcf91dda6fffe8004359f12e45b50e2af19 not found: ID does not exist" containerID="b4f6b2a396991e14acfc471e6cfbcbcf91dda6fffe8004359f12e45b50e2af19" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.513399 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b4f6b2a396991e14acfc471e6cfbcbcf91dda6fffe8004359f12e45b50e2af19"} err="failed to get container status \"b4f6b2a396991e14acfc471e6cfbcbcf91dda6fffe8004359f12e45b50e2af19\": rpc error: code = NotFound desc = could not find container \"b4f6b2a396991e14acfc471e6cfbcbcf91dda6fffe8004359f12e45b50e2af19\": container with ID starting with b4f6b2a396991e14acfc471e6cfbcbcf91dda6fffe8004359f12e45b50e2af19 not found: ID does not exist" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.513431 4812 scope.go:117] "RemoveContainer" containerID="9fab543b32520406fed535c69d7779732a9d16cae46c402faaa0178240d8b478" Nov 25 16:51:54 crc kubenswrapper[4812]: E1125 16:51:54.514126 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9fab543b32520406fed535c69d7779732a9d16cae46c402faaa0178240d8b478\": container with ID starting with 9fab543b32520406fed535c69d7779732a9d16cae46c402faaa0178240d8b478 not found: ID does not exist" containerID="9fab543b32520406fed535c69d7779732a9d16cae46c402faaa0178240d8b478" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.514153 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9fab543b32520406fed535c69d7779732a9d16cae46c402faaa0178240d8b478"} err="failed to get container status \"9fab543b32520406fed535c69d7779732a9d16cae46c402faaa0178240d8b478\": rpc error: code = NotFound desc = could not find container \"9fab543b32520406fed535c69d7779732a9d16cae46c402faaa0178240d8b478\": container with ID starting with 9fab543b32520406fed535c69d7779732a9d16cae46c402faaa0178240d8b478 not found: ID does not exist" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.514167 4812 scope.go:117] "RemoveContainer" containerID="1de63071db6944870c4b377267fa03c8c2456afdea90ee3f56b1f01ba6793bad" Nov 25 16:51:54 crc kubenswrapper[4812]: E1125 16:51:54.514623 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1de63071db6944870c4b377267fa03c8c2456afdea90ee3f56b1f01ba6793bad\": container with ID starting with 1de63071db6944870c4b377267fa03c8c2456afdea90ee3f56b1f01ba6793bad not found: ID does not exist" containerID="1de63071db6944870c4b377267fa03c8c2456afdea90ee3f56b1f01ba6793bad" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.514646 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1de63071db6944870c4b377267fa03c8c2456afdea90ee3f56b1f01ba6793bad"} err="failed to get container status \"1de63071db6944870c4b377267fa03c8c2456afdea90ee3f56b1f01ba6793bad\": rpc error: code = NotFound desc = could not find container \"1de63071db6944870c4b377267fa03c8c2456afdea90ee3f56b1f01ba6793bad\": container with ID starting with 1de63071db6944870c4b377267fa03c8c2456afdea90ee3f56b1f01ba6793bad not found: ID does not exist" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.514661 4812 scope.go:117] "RemoveContainer" containerID="49ba22f4a117f9f3b1485d8b4d4a8581d15157c6def05b2140c1724e89c4c675" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.528460 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c1f1e583-20dd-4501-bb66-a4ee8239367a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c1f1e583-20dd-4501-bb66-a4ee8239367a" (UID: "c1f1e583-20dd-4501-bb66-a4ee8239367a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.529297 4812 scope.go:117] "RemoveContainer" containerID="41f8b9cc5aab1ff59e6855172fab87efde12b1a2e072c4b0f7d4b641fb272fb0" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.544761 4812 scope.go:117] "RemoveContainer" containerID="462b7370dd7a71b8fb3c6bdde42b025cc74d36791d39b2a2ef9856674ecfba2c" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.562906 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c4js7\" (UniqueName: \"kubernetes.io/projected/be7271bc-142b-4cff-aabd-8a69d6373849-kube-api-access-c4js7\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.562939 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f2cd8a2b-d216-49b7-b86c-fa6b743f238d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.562952 4812 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/be7271bc-142b-4cff-aabd-8a69d6373849-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.562961 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df156ba1-d2fb-45ea-bffe-4b2bdeb18d72-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.562970 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x6jjg\" (UniqueName: \"kubernetes.io/projected/c1f1e583-20dd-4501-bb66-a4ee8239367a-kube-api-access-x6jjg\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.562980 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m6fqs\" (UniqueName: \"kubernetes.io/projected/df156ba1-d2fb-45ea-bffe-4b2bdeb18d72-kube-api-access-m6fqs\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.562990 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c1f1e583-20dd-4501-bb66-a4ee8239367a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.563000 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c1f1e583-20dd-4501-bb66-a4ee8239367a-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.563009 4812 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/be7271bc-142b-4cff-aabd-8a69d6373849-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.563017 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df156ba1-d2fb-45ea-bffe-4b2bdeb18d72-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.566033 4812 scope.go:117] "RemoveContainer" containerID="49ba22f4a117f9f3b1485d8b4d4a8581d15157c6def05b2140c1724e89c4c675" Nov 25 16:51:54 crc kubenswrapper[4812]: E1125 16:51:54.567324 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"49ba22f4a117f9f3b1485d8b4d4a8581d15157c6def05b2140c1724e89c4c675\": container with ID starting with 49ba22f4a117f9f3b1485d8b4d4a8581d15157c6def05b2140c1724e89c4c675 not found: ID does not exist" containerID="49ba22f4a117f9f3b1485d8b4d4a8581d15157c6def05b2140c1724e89c4c675" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.567390 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"49ba22f4a117f9f3b1485d8b4d4a8581d15157c6def05b2140c1724e89c4c675"} err="failed to get container status \"49ba22f4a117f9f3b1485d8b4d4a8581d15157c6def05b2140c1724e89c4c675\": rpc error: code = NotFound desc = could not find container \"49ba22f4a117f9f3b1485d8b4d4a8581d15157c6def05b2140c1724e89c4c675\": container with ID starting with 49ba22f4a117f9f3b1485d8b4d4a8581d15157c6def05b2140c1724e89c4c675 not found: ID does not exist" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.567702 4812 scope.go:117] "RemoveContainer" containerID="41f8b9cc5aab1ff59e6855172fab87efde12b1a2e072c4b0f7d4b641fb272fb0" Nov 25 16:51:54 crc kubenswrapper[4812]: E1125 16:51:54.568228 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"41f8b9cc5aab1ff59e6855172fab87efde12b1a2e072c4b0f7d4b641fb272fb0\": container with ID starting with 41f8b9cc5aab1ff59e6855172fab87efde12b1a2e072c4b0f7d4b641fb272fb0 not found: ID does not exist" containerID="41f8b9cc5aab1ff59e6855172fab87efde12b1a2e072c4b0f7d4b641fb272fb0" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.568264 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"41f8b9cc5aab1ff59e6855172fab87efde12b1a2e072c4b0f7d4b641fb272fb0"} err="failed to get container status \"41f8b9cc5aab1ff59e6855172fab87efde12b1a2e072c4b0f7d4b641fb272fb0\": rpc error: code = NotFound desc = could not find container \"41f8b9cc5aab1ff59e6855172fab87efde12b1a2e072c4b0f7d4b641fb272fb0\": container with ID starting with 41f8b9cc5aab1ff59e6855172fab87efde12b1a2e072c4b0f7d4b641fb272fb0 not found: ID does not exist" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.568291 4812 scope.go:117] "RemoveContainer" containerID="462b7370dd7a71b8fb3c6bdde42b025cc74d36791d39b2a2ef9856674ecfba2c" Nov 25 16:51:54 crc kubenswrapper[4812]: E1125 16:51:54.568878 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"462b7370dd7a71b8fb3c6bdde42b025cc74d36791d39b2a2ef9856674ecfba2c\": container with ID starting with 462b7370dd7a71b8fb3c6bdde42b025cc74d36791d39b2a2ef9856674ecfba2c not found: ID does not exist" containerID="462b7370dd7a71b8fb3c6bdde42b025cc74d36791d39b2a2ef9856674ecfba2c" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.568919 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"462b7370dd7a71b8fb3c6bdde42b025cc74d36791d39b2a2ef9856674ecfba2c"} err="failed to get container status \"462b7370dd7a71b8fb3c6bdde42b025cc74d36791d39b2a2ef9856674ecfba2c\": rpc error: code = NotFound desc = could not find container \"462b7370dd7a71b8fb3c6bdde42b025cc74d36791d39b2a2ef9856674ecfba2c\": container with ID starting with 462b7370dd7a71b8fb3c6bdde42b025cc74d36791d39b2a2ef9856674ecfba2c not found: ID does not exist" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.568944 4812 scope.go:117] "RemoveContainer" containerID="af0e44f49772d94b0a6010454b70e955e73f1bc92febad03fe7cb6139ea4f291" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.587483 4812 scope.go:117] "RemoveContainer" containerID="eb196078a050e20df99537a02a1f1abfc1c84e12339e18108ebee9822041758a" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.594670 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-wsrf4"] Nov 25 16:51:54 crc kubenswrapper[4812]: W1125 16:51:54.598072 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod59893999_475f_45a2_9969_648b6133d46d.slice/crio-801ffed36d9844782088f70d4a43cecea3793e40912dbf0fdeae4ad269156366 WatchSource:0}: Error finding container 801ffed36d9844782088f70d4a43cecea3793e40912dbf0fdeae4ad269156366: Status 404 returned error can't find the container with id 801ffed36d9844782088f70d4a43cecea3793e40912dbf0fdeae4ad269156366 Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.608560 4812 scope.go:117] "RemoveContainer" containerID="09584a08858e53215fa916af4d21ac88819a1741aa3132cbd9e60b1b4d97752a" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.636340 4812 scope.go:117] "RemoveContainer" containerID="af0e44f49772d94b0a6010454b70e955e73f1bc92febad03fe7cb6139ea4f291" Nov 25 16:51:54 crc kubenswrapper[4812]: E1125 16:51:54.636978 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"af0e44f49772d94b0a6010454b70e955e73f1bc92febad03fe7cb6139ea4f291\": container with ID starting with af0e44f49772d94b0a6010454b70e955e73f1bc92febad03fe7cb6139ea4f291 not found: ID does not exist" containerID="af0e44f49772d94b0a6010454b70e955e73f1bc92febad03fe7cb6139ea4f291" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.637041 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"af0e44f49772d94b0a6010454b70e955e73f1bc92febad03fe7cb6139ea4f291"} err="failed to get container status \"af0e44f49772d94b0a6010454b70e955e73f1bc92febad03fe7cb6139ea4f291\": rpc error: code = NotFound desc = could not find container \"af0e44f49772d94b0a6010454b70e955e73f1bc92febad03fe7cb6139ea4f291\": container with ID starting with af0e44f49772d94b0a6010454b70e955e73f1bc92febad03fe7cb6139ea4f291 not found: ID does not exist" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.637083 4812 scope.go:117] "RemoveContainer" containerID="eb196078a050e20df99537a02a1f1abfc1c84e12339e18108ebee9822041758a" Nov 25 16:51:54 crc kubenswrapper[4812]: E1125 16:51:54.637596 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb196078a050e20df99537a02a1f1abfc1c84e12339e18108ebee9822041758a\": container with ID starting with eb196078a050e20df99537a02a1f1abfc1c84e12339e18108ebee9822041758a not found: ID does not exist" containerID="eb196078a050e20df99537a02a1f1abfc1c84e12339e18108ebee9822041758a" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.637645 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb196078a050e20df99537a02a1f1abfc1c84e12339e18108ebee9822041758a"} err="failed to get container status \"eb196078a050e20df99537a02a1f1abfc1c84e12339e18108ebee9822041758a\": rpc error: code = NotFound desc = could not find container \"eb196078a050e20df99537a02a1f1abfc1c84e12339e18108ebee9822041758a\": container with ID starting with eb196078a050e20df99537a02a1f1abfc1c84e12339e18108ebee9822041758a not found: ID does not exist" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.637678 4812 scope.go:117] "RemoveContainer" containerID="09584a08858e53215fa916af4d21ac88819a1741aa3132cbd9e60b1b4d97752a" Nov 25 16:51:54 crc kubenswrapper[4812]: E1125 16:51:54.638047 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"09584a08858e53215fa916af4d21ac88819a1741aa3132cbd9e60b1b4d97752a\": container with ID starting with 09584a08858e53215fa916af4d21ac88819a1741aa3132cbd9e60b1b4d97752a not found: ID does not exist" containerID="09584a08858e53215fa916af4d21ac88819a1741aa3132cbd9e60b1b4d97752a" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.638078 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"09584a08858e53215fa916af4d21ac88819a1741aa3132cbd9e60b1b4d97752a"} err="failed to get container status \"09584a08858e53215fa916af4d21ac88819a1741aa3132cbd9e60b1b4d97752a\": rpc error: code = NotFound desc = could not find container \"09584a08858e53215fa916af4d21ac88819a1741aa3132cbd9e60b1b4d97752a\": container with ID starting with 09584a08858e53215fa916af4d21ac88819a1741aa3132cbd9e60b1b4d97752a not found: ID does not exist" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.638094 4812 scope.go:117] "RemoveContainer" containerID="e56b3180da629bf5ccbf92db9a499ee16b21ac5154d01e8ea45231c00fed9d38" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.652692 4812 scope.go:117] "RemoveContainer" containerID="e56b3180da629bf5ccbf92db9a499ee16b21ac5154d01e8ea45231c00fed9d38" Nov 25 16:51:54 crc kubenswrapper[4812]: E1125 16:51:54.653128 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e56b3180da629bf5ccbf92db9a499ee16b21ac5154d01e8ea45231c00fed9d38\": container with ID starting with e56b3180da629bf5ccbf92db9a499ee16b21ac5154d01e8ea45231c00fed9d38 not found: ID does not exist" containerID="e56b3180da629bf5ccbf92db9a499ee16b21ac5154d01e8ea45231c00fed9d38" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.653164 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e56b3180da629bf5ccbf92db9a499ee16b21ac5154d01e8ea45231c00fed9d38"} err="failed to get container status \"e56b3180da629bf5ccbf92db9a499ee16b21ac5154d01e8ea45231c00fed9d38\": rpc error: code = NotFound desc = could not find container \"e56b3180da629bf5ccbf92db9a499ee16b21ac5154d01e8ea45231c00fed9d38\": container with ID starting with e56b3180da629bf5ccbf92db9a499ee16b21ac5154d01e8ea45231c00fed9d38 not found: ID does not exist" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.653194 4812 scope.go:117] "RemoveContainer" containerID="5b9dafe90b710850847699dab05556773af8ad5bb9f392dcec9aeb8a115d84f5" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.677915 4812 scope.go:117] "RemoveContainer" containerID="5fba6613f2723992b00834bb8c2fe3b13b1f04664088261802779c5229258489" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.701690 4812 scope.go:117] "RemoveContainer" containerID="ea387f5037f4cbbcc6e64cbac29bb43c0f842f6a45a6dfc79322f4e6c0bb1777" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.715641 4812 scope.go:117] "RemoveContainer" containerID="5b9dafe90b710850847699dab05556773af8ad5bb9f392dcec9aeb8a115d84f5" Nov 25 16:51:54 crc kubenswrapper[4812]: E1125 16:51:54.716167 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5b9dafe90b710850847699dab05556773af8ad5bb9f392dcec9aeb8a115d84f5\": container with ID starting with 5b9dafe90b710850847699dab05556773af8ad5bb9f392dcec9aeb8a115d84f5 not found: ID does not exist" containerID="5b9dafe90b710850847699dab05556773af8ad5bb9f392dcec9aeb8a115d84f5" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.716221 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5b9dafe90b710850847699dab05556773af8ad5bb9f392dcec9aeb8a115d84f5"} err="failed to get container status \"5b9dafe90b710850847699dab05556773af8ad5bb9f392dcec9aeb8a115d84f5\": rpc error: code = NotFound desc = could not find container \"5b9dafe90b710850847699dab05556773af8ad5bb9f392dcec9aeb8a115d84f5\": container with ID starting with 5b9dafe90b710850847699dab05556773af8ad5bb9f392dcec9aeb8a115d84f5 not found: ID does not exist" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.716262 4812 scope.go:117] "RemoveContainer" containerID="5fba6613f2723992b00834bb8c2fe3b13b1f04664088261802779c5229258489" Nov 25 16:51:54 crc kubenswrapper[4812]: E1125 16:51:54.716776 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5fba6613f2723992b00834bb8c2fe3b13b1f04664088261802779c5229258489\": container with ID starting with 5fba6613f2723992b00834bb8c2fe3b13b1f04664088261802779c5229258489 not found: ID does not exist" containerID="5fba6613f2723992b00834bb8c2fe3b13b1f04664088261802779c5229258489" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.716830 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5fba6613f2723992b00834bb8c2fe3b13b1f04664088261802779c5229258489"} err="failed to get container status \"5fba6613f2723992b00834bb8c2fe3b13b1f04664088261802779c5229258489\": rpc error: code = NotFound desc = could not find container \"5fba6613f2723992b00834bb8c2fe3b13b1f04664088261802779c5229258489\": container with ID starting with 5fba6613f2723992b00834bb8c2fe3b13b1f04664088261802779c5229258489 not found: ID does not exist" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.716851 4812 scope.go:117] "RemoveContainer" containerID="ea387f5037f4cbbcc6e64cbac29bb43c0f842f6a45a6dfc79322f4e6c0bb1777" Nov 25 16:51:54 crc kubenswrapper[4812]: E1125 16:51:54.717137 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ea387f5037f4cbbcc6e64cbac29bb43c0f842f6a45a6dfc79322f4e6c0bb1777\": container with ID starting with ea387f5037f4cbbcc6e64cbac29bb43c0f842f6a45a6dfc79322f4e6c0bb1777 not found: ID does not exist" containerID="ea387f5037f4cbbcc6e64cbac29bb43c0f842f6a45a6dfc79322f4e6c0bb1777" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.717162 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ea387f5037f4cbbcc6e64cbac29bb43c0f842f6a45a6dfc79322f4e6c0bb1777"} err="failed to get container status \"ea387f5037f4cbbcc6e64cbac29bb43c0f842f6a45a6dfc79322f4e6c0bb1777\": rpc error: code = NotFound desc = could not find container \"ea387f5037f4cbbcc6e64cbac29bb43c0f842f6a45a6dfc79322f4e6c0bb1777\": container with ID starting with ea387f5037f4cbbcc6e64cbac29bb43c0f842f6a45a6dfc79322f4e6c0bb1777 not found: ID does not exist" Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.792715 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-49nlv"] Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.795851 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-49nlv"] Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.806788 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-v7lp8"] Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.823003 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-v7lp8"] Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.827111 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jwjxx"] Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.829876 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-jwjxx"] Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.834139 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pfrp6"] Nov 25 16:51:54 crc kubenswrapper[4812]: I1125 16:51:54.836510 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-pfrp6"] Nov 25 16:51:55 crc kubenswrapper[4812]: I1125 16:51:55.476561 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-wsrf4" event={"ID":"59893999-475f-45a2-9969-648b6133d46d","Type":"ContainerStarted","Data":"3eacb4ac230b76fc16cd5d4f75de846eb99c29f0be57786ef14e04dc085e1d0d"} Nov 25 16:51:55 crc kubenswrapper[4812]: I1125 16:51:55.476620 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-wsrf4" event={"ID":"59893999-475f-45a2-9969-648b6133d46d","Type":"ContainerStarted","Data":"801ffed36d9844782088f70d4a43cecea3793e40912dbf0fdeae4ad269156366"} Nov 25 16:51:55 crc kubenswrapper[4812]: I1125 16:51:55.476644 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-wsrf4" Nov 25 16:51:55 crc kubenswrapper[4812]: I1125 16:51:55.480724 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-wsrf4" Nov 25 16:51:55 crc kubenswrapper[4812]: I1125 16:51:55.493411 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-wsrf4" podStartSLOduration=2.493392346 podStartE2EDuration="2.493392346s" podCreationTimestamp="2025-11-25 16:51:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:51:55.491607674 +0000 UTC m=+290.331749769" watchObservedRunningTime="2025-11-25 16:51:55.493392346 +0000 UTC m=+290.333534441" Nov 25 16:51:55 crc kubenswrapper[4812]: I1125 16:51:55.843965 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="17f59f39-a958-4cb4-8a6a-679e7f08a13b" path="/var/lib/kubelet/pods/17f59f39-a958-4cb4-8a6a-679e7f08a13b/volumes" Nov 25 16:51:55 crc kubenswrapper[4812]: I1125 16:51:55.845240 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be7271bc-142b-4cff-aabd-8a69d6373849" path="/var/lib/kubelet/pods/be7271bc-142b-4cff-aabd-8a69d6373849/volumes" Nov 25 16:51:55 crc kubenswrapper[4812]: I1125 16:51:55.845688 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c1f1e583-20dd-4501-bb66-a4ee8239367a" path="/var/lib/kubelet/pods/c1f1e583-20dd-4501-bb66-a4ee8239367a/volumes" Nov 25 16:51:55 crc kubenswrapper[4812]: I1125 16:51:55.846657 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="df156ba1-d2fb-45ea-bffe-4b2bdeb18d72" path="/var/lib/kubelet/pods/df156ba1-d2fb-45ea-bffe-4b2bdeb18d72/volumes" Nov 25 16:51:55 crc kubenswrapper[4812]: I1125 16:51:55.847192 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f2cd8a2b-d216-49b7-b86c-fa6b743f238d" path="/var/lib/kubelet/pods/f2cd8a2b-d216-49b7-b86c-fa6b743f238d/volumes" Nov 25 16:51:55 crc kubenswrapper[4812]: I1125 16:51:55.992381 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-s69c5"] Nov 25 16:51:55 crc kubenswrapper[4812]: E1125 16:51:55.992672 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be7271bc-142b-4cff-aabd-8a69d6373849" containerName="marketplace-operator" Nov 25 16:51:55 crc kubenswrapper[4812]: I1125 16:51:55.992689 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="be7271bc-142b-4cff-aabd-8a69d6373849" containerName="marketplace-operator" Nov 25 16:51:55 crc kubenswrapper[4812]: E1125 16:51:55.992701 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17f59f39-a958-4cb4-8a6a-679e7f08a13b" containerName="registry-server" Nov 25 16:51:55 crc kubenswrapper[4812]: I1125 16:51:55.992709 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="17f59f39-a958-4cb4-8a6a-679e7f08a13b" containerName="registry-server" Nov 25 16:51:55 crc kubenswrapper[4812]: E1125 16:51:55.992728 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1f1e583-20dd-4501-bb66-a4ee8239367a" containerName="extract-utilities" Nov 25 16:51:55 crc kubenswrapper[4812]: I1125 16:51:55.992737 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1f1e583-20dd-4501-bb66-a4ee8239367a" containerName="extract-utilities" Nov 25 16:51:55 crc kubenswrapper[4812]: E1125 16:51:55.992747 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df156ba1-d2fb-45ea-bffe-4b2bdeb18d72" containerName="extract-utilities" Nov 25 16:51:55 crc kubenswrapper[4812]: I1125 16:51:55.992757 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="df156ba1-d2fb-45ea-bffe-4b2bdeb18d72" containerName="extract-utilities" Nov 25 16:51:55 crc kubenswrapper[4812]: E1125 16:51:55.992772 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1f1e583-20dd-4501-bb66-a4ee8239367a" containerName="registry-server" Nov 25 16:51:55 crc kubenswrapper[4812]: I1125 16:51:55.992780 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1f1e583-20dd-4501-bb66-a4ee8239367a" containerName="registry-server" Nov 25 16:51:55 crc kubenswrapper[4812]: E1125 16:51:55.992792 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1f1e583-20dd-4501-bb66-a4ee8239367a" containerName="extract-content" Nov 25 16:51:55 crc kubenswrapper[4812]: I1125 16:51:55.992799 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1f1e583-20dd-4501-bb66-a4ee8239367a" containerName="extract-content" Nov 25 16:51:55 crc kubenswrapper[4812]: E1125 16:51:55.992810 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2cd8a2b-d216-49b7-b86c-fa6b743f238d" containerName="extract-content" Nov 25 16:51:55 crc kubenswrapper[4812]: I1125 16:51:55.992818 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2cd8a2b-d216-49b7-b86c-fa6b743f238d" containerName="extract-content" Nov 25 16:51:55 crc kubenswrapper[4812]: E1125 16:51:55.992831 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2cd8a2b-d216-49b7-b86c-fa6b743f238d" containerName="extract-utilities" Nov 25 16:51:55 crc kubenswrapper[4812]: I1125 16:51:55.992839 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2cd8a2b-d216-49b7-b86c-fa6b743f238d" containerName="extract-utilities" Nov 25 16:51:55 crc kubenswrapper[4812]: E1125 16:51:55.992854 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df156ba1-d2fb-45ea-bffe-4b2bdeb18d72" containerName="extract-content" Nov 25 16:51:55 crc kubenswrapper[4812]: I1125 16:51:55.992862 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="df156ba1-d2fb-45ea-bffe-4b2bdeb18d72" containerName="extract-content" Nov 25 16:51:55 crc kubenswrapper[4812]: E1125 16:51:55.992873 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17f59f39-a958-4cb4-8a6a-679e7f08a13b" containerName="extract-content" Nov 25 16:51:55 crc kubenswrapper[4812]: I1125 16:51:55.992880 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="17f59f39-a958-4cb4-8a6a-679e7f08a13b" containerName="extract-content" Nov 25 16:51:55 crc kubenswrapper[4812]: E1125 16:51:55.992892 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df156ba1-d2fb-45ea-bffe-4b2bdeb18d72" containerName="registry-server" Nov 25 16:51:55 crc kubenswrapper[4812]: I1125 16:51:55.992899 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="df156ba1-d2fb-45ea-bffe-4b2bdeb18d72" containerName="registry-server" Nov 25 16:51:55 crc kubenswrapper[4812]: E1125 16:51:55.992908 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17f59f39-a958-4cb4-8a6a-679e7f08a13b" containerName="extract-utilities" Nov 25 16:51:55 crc kubenswrapper[4812]: I1125 16:51:55.992915 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="17f59f39-a958-4cb4-8a6a-679e7f08a13b" containerName="extract-utilities" Nov 25 16:51:55 crc kubenswrapper[4812]: E1125 16:51:55.992923 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2cd8a2b-d216-49b7-b86c-fa6b743f238d" containerName="registry-server" Nov 25 16:51:55 crc kubenswrapper[4812]: I1125 16:51:55.992931 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2cd8a2b-d216-49b7-b86c-fa6b743f238d" containerName="registry-server" Nov 25 16:51:55 crc kubenswrapper[4812]: I1125 16:51:55.993052 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="df156ba1-d2fb-45ea-bffe-4b2bdeb18d72" containerName="registry-server" Nov 25 16:51:55 crc kubenswrapper[4812]: I1125 16:51:55.993067 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="17f59f39-a958-4cb4-8a6a-679e7f08a13b" containerName="registry-server" Nov 25 16:51:55 crc kubenswrapper[4812]: I1125 16:51:55.993077 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1f1e583-20dd-4501-bb66-a4ee8239367a" containerName="registry-server" Nov 25 16:51:55 crc kubenswrapper[4812]: I1125 16:51:55.993087 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2cd8a2b-d216-49b7-b86c-fa6b743f238d" containerName="registry-server" Nov 25 16:51:55 crc kubenswrapper[4812]: I1125 16:51:55.993096 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="be7271bc-142b-4cff-aabd-8a69d6373849" containerName="marketplace-operator" Nov 25 16:51:55 crc kubenswrapper[4812]: I1125 16:51:55.993909 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s69c5" Nov 25 16:51:55 crc kubenswrapper[4812]: I1125 16:51:55.996609 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 25 16:51:56 crc kubenswrapper[4812]: I1125 16:51:56.005190 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-s69c5"] Nov 25 16:51:56 crc kubenswrapper[4812]: I1125 16:51:56.084267 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5be2bd70-a890-4af3-a0fc-5a87851211f9-catalog-content\") pod \"redhat-marketplace-s69c5\" (UID: \"5be2bd70-a890-4af3-a0fc-5a87851211f9\") " pod="openshift-marketplace/redhat-marketplace-s69c5" Nov 25 16:51:56 crc kubenswrapper[4812]: I1125 16:51:56.084386 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m5sjf\" (UniqueName: \"kubernetes.io/projected/5be2bd70-a890-4af3-a0fc-5a87851211f9-kube-api-access-m5sjf\") pod \"redhat-marketplace-s69c5\" (UID: \"5be2bd70-a890-4af3-a0fc-5a87851211f9\") " pod="openshift-marketplace/redhat-marketplace-s69c5" Nov 25 16:51:56 crc kubenswrapper[4812]: I1125 16:51:56.084428 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5be2bd70-a890-4af3-a0fc-5a87851211f9-utilities\") pod \"redhat-marketplace-s69c5\" (UID: \"5be2bd70-a890-4af3-a0fc-5a87851211f9\") " pod="openshift-marketplace/redhat-marketplace-s69c5" Nov 25 16:51:56 crc kubenswrapper[4812]: I1125 16:51:56.185499 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5be2bd70-a890-4af3-a0fc-5a87851211f9-utilities\") pod \"redhat-marketplace-s69c5\" (UID: \"5be2bd70-a890-4af3-a0fc-5a87851211f9\") " pod="openshift-marketplace/redhat-marketplace-s69c5" Nov 25 16:51:56 crc kubenswrapper[4812]: I1125 16:51:56.185558 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5be2bd70-a890-4af3-a0fc-5a87851211f9-catalog-content\") pod \"redhat-marketplace-s69c5\" (UID: \"5be2bd70-a890-4af3-a0fc-5a87851211f9\") " pod="openshift-marketplace/redhat-marketplace-s69c5" Nov 25 16:51:56 crc kubenswrapper[4812]: I1125 16:51:56.185624 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m5sjf\" (UniqueName: \"kubernetes.io/projected/5be2bd70-a890-4af3-a0fc-5a87851211f9-kube-api-access-m5sjf\") pod \"redhat-marketplace-s69c5\" (UID: \"5be2bd70-a890-4af3-a0fc-5a87851211f9\") " pod="openshift-marketplace/redhat-marketplace-s69c5" Nov 25 16:51:56 crc kubenswrapper[4812]: I1125 16:51:56.187327 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5be2bd70-a890-4af3-a0fc-5a87851211f9-catalog-content\") pod \"redhat-marketplace-s69c5\" (UID: \"5be2bd70-a890-4af3-a0fc-5a87851211f9\") " pod="openshift-marketplace/redhat-marketplace-s69c5" Nov 25 16:51:56 crc kubenswrapper[4812]: I1125 16:51:56.187348 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5be2bd70-a890-4af3-a0fc-5a87851211f9-utilities\") pod \"redhat-marketplace-s69c5\" (UID: \"5be2bd70-a890-4af3-a0fc-5a87851211f9\") " pod="openshift-marketplace/redhat-marketplace-s69c5" Nov 25 16:51:56 crc kubenswrapper[4812]: I1125 16:51:56.199989 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-cgdkf"] Nov 25 16:51:56 crc kubenswrapper[4812]: I1125 16:51:56.201925 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cgdkf" Nov 25 16:51:56 crc kubenswrapper[4812]: I1125 16:51:56.208646 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m5sjf\" (UniqueName: \"kubernetes.io/projected/5be2bd70-a890-4af3-a0fc-5a87851211f9-kube-api-access-m5sjf\") pod \"redhat-marketplace-s69c5\" (UID: \"5be2bd70-a890-4af3-a0fc-5a87851211f9\") " pod="openshift-marketplace/redhat-marketplace-s69c5" Nov 25 16:51:56 crc kubenswrapper[4812]: I1125 16:51:56.208818 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-cgdkf"] Nov 25 16:51:56 crc kubenswrapper[4812]: I1125 16:51:56.209661 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 25 16:51:56 crc kubenswrapper[4812]: I1125 16:51:56.286613 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d893abf0-5f20-4874-ad11-48bd8a156929-catalog-content\") pod \"community-operators-cgdkf\" (UID: \"d893abf0-5f20-4874-ad11-48bd8a156929\") " pod="openshift-marketplace/community-operators-cgdkf" Nov 25 16:51:56 crc kubenswrapper[4812]: I1125 16:51:56.286673 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qggbh\" (UniqueName: \"kubernetes.io/projected/d893abf0-5f20-4874-ad11-48bd8a156929-kube-api-access-qggbh\") pod \"community-operators-cgdkf\" (UID: \"d893abf0-5f20-4874-ad11-48bd8a156929\") " pod="openshift-marketplace/community-operators-cgdkf" Nov 25 16:51:56 crc kubenswrapper[4812]: I1125 16:51:56.286735 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d893abf0-5f20-4874-ad11-48bd8a156929-utilities\") pod \"community-operators-cgdkf\" (UID: \"d893abf0-5f20-4874-ad11-48bd8a156929\") " pod="openshift-marketplace/community-operators-cgdkf" Nov 25 16:51:56 crc kubenswrapper[4812]: I1125 16:51:56.319493 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s69c5" Nov 25 16:51:56 crc kubenswrapper[4812]: I1125 16:51:56.395329 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d893abf0-5f20-4874-ad11-48bd8a156929-utilities\") pod \"community-operators-cgdkf\" (UID: \"d893abf0-5f20-4874-ad11-48bd8a156929\") " pod="openshift-marketplace/community-operators-cgdkf" Nov 25 16:51:56 crc kubenswrapper[4812]: I1125 16:51:56.395623 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d893abf0-5f20-4874-ad11-48bd8a156929-catalog-content\") pod \"community-operators-cgdkf\" (UID: \"d893abf0-5f20-4874-ad11-48bd8a156929\") " pod="openshift-marketplace/community-operators-cgdkf" Nov 25 16:51:56 crc kubenswrapper[4812]: I1125 16:51:56.395665 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qggbh\" (UniqueName: \"kubernetes.io/projected/d893abf0-5f20-4874-ad11-48bd8a156929-kube-api-access-qggbh\") pod \"community-operators-cgdkf\" (UID: \"d893abf0-5f20-4874-ad11-48bd8a156929\") " pod="openshift-marketplace/community-operators-cgdkf" Nov 25 16:51:56 crc kubenswrapper[4812]: I1125 16:51:56.395928 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d893abf0-5f20-4874-ad11-48bd8a156929-utilities\") pod \"community-operators-cgdkf\" (UID: \"d893abf0-5f20-4874-ad11-48bd8a156929\") " pod="openshift-marketplace/community-operators-cgdkf" Nov 25 16:51:56 crc kubenswrapper[4812]: I1125 16:51:56.396206 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d893abf0-5f20-4874-ad11-48bd8a156929-catalog-content\") pod \"community-operators-cgdkf\" (UID: \"d893abf0-5f20-4874-ad11-48bd8a156929\") " pod="openshift-marketplace/community-operators-cgdkf" Nov 25 16:51:56 crc kubenswrapper[4812]: I1125 16:51:56.424169 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qggbh\" (UniqueName: \"kubernetes.io/projected/d893abf0-5f20-4874-ad11-48bd8a156929-kube-api-access-qggbh\") pod \"community-operators-cgdkf\" (UID: \"d893abf0-5f20-4874-ad11-48bd8a156929\") " pod="openshift-marketplace/community-operators-cgdkf" Nov 25 16:51:56 crc kubenswrapper[4812]: I1125 16:51:56.511146 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-s69c5"] Nov 25 16:51:56 crc kubenswrapper[4812]: I1125 16:51:56.540120 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cgdkf" Nov 25 16:51:56 crc kubenswrapper[4812]: I1125 16:51:56.717306 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-cgdkf"] Nov 25 16:51:56 crc kubenswrapper[4812]: W1125 16:51:56.750187 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd893abf0_5f20_4874_ad11_48bd8a156929.slice/crio-9855f5080d1df900d076306866c5875dcd1b0ca44db4357d89de09bfae092ee5 WatchSource:0}: Error finding container 9855f5080d1df900d076306866c5875dcd1b0ca44db4357d89de09bfae092ee5: Status 404 returned error can't find the container with id 9855f5080d1df900d076306866c5875dcd1b0ca44db4357d89de09bfae092ee5 Nov 25 16:51:57 crc kubenswrapper[4812]: I1125 16:51:57.492556 4812 generic.go:334] "Generic (PLEG): container finished" podID="d893abf0-5f20-4874-ad11-48bd8a156929" containerID="d660da82bb9987f59926a3f6d40cc04ac635479e0905443b0fd2d85a67c1ef40" exitCode=0 Nov 25 16:51:57 crc kubenswrapper[4812]: I1125 16:51:57.492666 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cgdkf" event={"ID":"d893abf0-5f20-4874-ad11-48bd8a156929","Type":"ContainerDied","Data":"d660da82bb9987f59926a3f6d40cc04ac635479e0905443b0fd2d85a67c1ef40"} Nov 25 16:51:57 crc kubenswrapper[4812]: I1125 16:51:57.492717 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cgdkf" event={"ID":"d893abf0-5f20-4874-ad11-48bd8a156929","Type":"ContainerStarted","Data":"9855f5080d1df900d076306866c5875dcd1b0ca44db4357d89de09bfae092ee5"} Nov 25 16:51:57 crc kubenswrapper[4812]: I1125 16:51:57.496612 4812 generic.go:334] "Generic (PLEG): container finished" podID="5be2bd70-a890-4af3-a0fc-5a87851211f9" containerID="fcee1576dfd93d07ee68addcc3cdc92804cf49415d3c65f565da2ab4cbce70cf" exitCode=0 Nov 25 16:51:57 crc kubenswrapper[4812]: I1125 16:51:57.496696 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s69c5" event={"ID":"5be2bd70-a890-4af3-a0fc-5a87851211f9","Type":"ContainerDied","Data":"fcee1576dfd93d07ee68addcc3cdc92804cf49415d3c65f565da2ab4cbce70cf"} Nov 25 16:51:57 crc kubenswrapper[4812]: I1125 16:51:57.496734 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s69c5" event={"ID":"5be2bd70-a890-4af3-a0fc-5a87851211f9","Type":"ContainerStarted","Data":"433497458c8c4091a707d38a1a275ac310677bcb0902df6f840086d10f612980"} Nov 25 16:51:58 crc kubenswrapper[4812]: I1125 16:51:58.390099 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-lhng2"] Nov 25 16:51:58 crc kubenswrapper[4812]: I1125 16:51:58.391629 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lhng2" Nov 25 16:51:58 crc kubenswrapper[4812]: I1125 16:51:58.394311 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 25 16:51:58 crc kubenswrapper[4812]: I1125 16:51:58.402163 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lhng2"] Nov 25 16:51:58 crc kubenswrapper[4812]: I1125 16:51:58.424419 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xgnhq\" (UniqueName: \"kubernetes.io/projected/d6e4606b-d577-4d2d-abfe-b001356839b6-kube-api-access-xgnhq\") pod \"certified-operators-lhng2\" (UID: \"d6e4606b-d577-4d2d-abfe-b001356839b6\") " pod="openshift-marketplace/certified-operators-lhng2" Nov 25 16:51:58 crc kubenswrapper[4812]: I1125 16:51:58.424482 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d6e4606b-d577-4d2d-abfe-b001356839b6-catalog-content\") pod \"certified-operators-lhng2\" (UID: \"d6e4606b-d577-4d2d-abfe-b001356839b6\") " pod="openshift-marketplace/certified-operators-lhng2" Nov 25 16:51:58 crc kubenswrapper[4812]: I1125 16:51:58.425170 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d6e4606b-d577-4d2d-abfe-b001356839b6-utilities\") pod \"certified-operators-lhng2\" (UID: \"d6e4606b-d577-4d2d-abfe-b001356839b6\") " pod="openshift-marketplace/certified-operators-lhng2" Nov 25 16:51:58 crc kubenswrapper[4812]: I1125 16:51:58.505659 4812 generic.go:334] "Generic (PLEG): container finished" podID="5be2bd70-a890-4af3-a0fc-5a87851211f9" containerID="f98381540392b2c4cc13c8b1c10df7ac8baa2665e58111be54b829b29878d7d8" exitCode=0 Nov 25 16:51:58 crc kubenswrapper[4812]: I1125 16:51:58.505744 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s69c5" event={"ID":"5be2bd70-a890-4af3-a0fc-5a87851211f9","Type":"ContainerDied","Data":"f98381540392b2c4cc13c8b1c10df7ac8baa2665e58111be54b829b29878d7d8"} Nov 25 16:51:58 crc kubenswrapper[4812]: I1125 16:51:58.511766 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cgdkf" event={"ID":"d893abf0-5f20-4874-ad11-48bd8a156929","Type":"ContainerStarted","Data":"75bb2efbbcd0369938defe59ef24d74dd36e6a7093c97a474cf9e315a7326028"} Nov 25 16:51:58 crc kubenswrapper[4812]: I1125 16:51:58.526092 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d6e4606b-d577-4d2d-abfe-b001356839b6-utilities\") pod \"certified-operators-lhng2\" (UID: \"d6e4606b-d577-4d2d-abfe-b001356839b6\") " pod="openshift-marketplace/certified-operators-lhng2" Nov 25 16:51:58 crc kubenswrapper[4812]: I1125 16:51:58.526163 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xgnhq\" (UniqueName: \"kubernetes.io/projected/d6e4606b-d577-4d2d-abfe-b001356839b6-kube-api-access-xgnhq\") pod \"certified-operators-lhng2\" (UID: \"d6e4606b-d577-4d2d-abfe-b001356839b6\") " pod="openshift-marketplace/certified-operators-lhng2" Nov 25 16:51:58 crc kubenswrapper[4812]: I1125 16:51:58.526180 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d6e4606b-d577-4d2d-abfe-b001356839b6-catalog-content\") pod \"certified-operators-lhng2\" (UID: \"d6e4606b-d577-4d2d-abfe-b001356839b6\") " pod="openshift-marketplace/certified-operators-lhng2" Nov 25 16:51:58 crc kubenswrapper[4812]: I1125 16:51:58.526719 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d6e4606b-d577-4d2d-abfe-b001356839b6-catalog-content\") pod \"certified-operators-lhng2\" (UID: \"d6e4606b-d577-4d2d-abfe-b001356839b6\") " pod="openshift-marketplace/certified-operators-lhng2" Nov 25 16:51:58 crc kubenswrapper[4812]: I1125 16:51:58.527565 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d6e4606b-d577-4d2d-abfe-b001356839b6-utilities\") pod \"certified-operators-lhng2\" (UID: \"d6e4606b-d577-4d2d-abfe-b001356839b6\") " pod="openshift-marketplace/certified-operators-lhng2" Nov 25 16:51:58 crc kubenswrapper[4812]: I1125 16:51:58.549730 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xgnhq\" (UniqueName: \"kubernetes.io/projected/d6e4606b-d577-4d2d-abfe-b001356839b6-kube-api-access-xgnhq\") pod \"certified-operators-lhng2\" (UID: \"d6e4606b-d577-4d2d-abfe-b001356839b6\") " pod="openshift-marketplace/certified-operators-lhng2" Nov 25 16:51:58 crc kubenswrapper[4812]: I1125 16:51:58.588062 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-vwfd7"] Nov 25 16:51:58 crc kubenswrapper[4812]: I1125 16:51:58.589231 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vwfd7" Nov 25 16:51:58 crc kubenswrapper[4812]: I1125 16:51:58.592556 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 25 16:51:58 crc kubenswrapper[4812]: I1125 16:51:58.599139 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vwfd7"] Nov 25 16:51:58 crc kubenswrapper[4812]: I1125 16:51:58.627514 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc701cb7-bf87-4c7a-b95d-c88ec018171c-utilities\") pod \"redhat-operators-vwfd7\" (UID: \"fc701cb7-bf87-4c7a-b95d-c88ec018171c\") " pod="openshift-marketplace/redhat-operators-vwfd7" Nov 25 16:51:58 crc kubenswrapper[4812]: I1125 16:51:58.627805 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k5n4m\" (UniqueName: \"kubernetes.io/projected/fc701cb7-bf87-4c7a-b95d-c88ec018171c-kube-api-access-k5n4m\") pod \"redhat-operators-vwfd7\" (UID: \"fc701cb7-bf87-4c7a-b95d-c88ec018171c\") " pod="openshift-marketplace/redhat-operators-vwfd7" Nov 25 16:51:58 crc kubenswrapper[4812]: I1125 16:51:58.627830 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc701cb7-bf87-4c7a-b95d-c88ec018171c-catalog-content\") pod \"redhat-operators-vwfd7\" (UID: \"fc701cb7-bf87-4c7a-b95d-c88ec018171c\") " pod="openshift-marketplace/redhat-operators-vwfd7" Nov 25 16:51:58 crc kubenswrapper[4812]: I1125 16:51:58.729153 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k5n4m\" (UniqueName: \"kubernetes.io/projected/fc701cb7-bf87-4c7a-b95d-c88ec018171c-kube-api-access-k5n4m\") pod \"redhat-operators-vwfd7\" (UID: \"fc701cb7-bf87-4c7a-b95d-c88ec018171c\") " pod="openshift-marketplace/redhat-operators-vwfd7" Nov 25 16:51:58 crc kubenswrapper[4812]: I1125 16:51:58.729199 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc701cb7-bf87-4c7a-b95d-c88ec018171c-catalog-content\") pod \"redhat-operators-vwfd7\" (UID: \"fc701cb7-bf87-4c7a-b95d-c88ec018171c\") " pod="openshift-marketplace/redhat-operators-vwfd7" Nov 25 16:51:58 crc kubenswrapper[4812]: I1125 16:51:58.729275 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc701cb7-bf87-4c7a-b95d-c88ec018171c-utilities\") pod \"redhat-operators-vwfd7\" (UID: \"fc701cb7-bf87-4c7a-b95d-c88ec018171c\") " pod="openshift-marketplace/redhat-operators-vwfd7" Nov 25 16:51:58 crc kubenswrapper[4812]: I1125 16:51:58.729750 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc701cb7-bf87-4c7a-b95d-c88ec018171c-catalog-content\") pod \"redhat-operators-vwfd7\" (UID: \"fc701cb7-bf87-4c7a-b95d-c88ec018171c\") " pod="openshift-marketplace/redhat-operators-vwfd7" Nov 25 16:51:58 crc kubenswrapper[4812]: I1125 16:51:58.729979 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc701cb7-bf87-4c7a-b95d-c88ec018171c-utilities\") pod \"redhat-operators-vwfd7\" (UID: \"fc701cb7-bf87-4c7a-b95d-c88ec018171c\") " pod="openshift-marketplace/redhat-operators-vwfd7" Nov 25 16:51:58 crc kubenswrapper[4812]: I1125 16:51:58.745469 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k5n4m\" (UniqueName: \"kubernetes.io/projected/fc701cb7-bf87-4c7a-b95d-c88ec018171c-kube-api-access-k5n4m\") pod \"redhat-operators-vwfd7\" (UID: \"fc701cb7-bf87-4c7a-b95d-c88ec018171c\") " pod="openshift-marketplace/redhat-operators-vwfd7" Nov 25 16:51:58 crc kubenswrapper[4812]: I1125 16:51:58.768249 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lhng2" Nov 25 16:51:58 crc kubenswrapper[4812]: I1125 16:51:58.931153 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lhng2"] Nov 25 16:51:58 crc kubenswrapper[4812]: W1125 16:51:58.940647 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd6e4606b_d577_4d2d_abfe_b001356839b6.slice/crio-ad94adf71f99a3d004565b59125b5cd234d0eb3d2c84571ca6e48aa3358f24f7 WatchSource:0}: Error finding container ad94adf71f99a3d004565b59125b5cd234d0eb3d2c84571ca6e48aa3358f24f7: Status 404 returned error can't find the container with id ad94adf71f99a3d004565b59125b5cd234d0eb3d2c84571ca6e48aa3358f24f7 Nov 25 16:51:58 crc kubenswrapper[4812]: I1125 16:51:58.940887 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vwfd7" Nov 25 16:51:59 crc kubenswrapper[4812]: I1125 16:51:59.140854 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vwfd7"] Nov 25 16:51:59 crc kubenswrapper[4812]: I1125 16:51:59.517450 4812 generic.go:334] "Generic (PLEG): container finished" podID="fc701cb7-bf87-4c7a-b95d-c88ec018171c" containerID="c2330bcc80a117a140079ce0151f570b64cd5ed2a2745e1c82b5d2d4b031c67a" exitCode=0 Nov 25 16:51:59 crc kubenswrapper[4812]: I1125 16:51:59.517518 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vwfd7" event={"ID":"fc701cb7-bf87-4c7a-b95d-c88ec018171c","Type":"ContainerDied","Data":"c2330bcc80a117a140079ce0151f570b64cd5ed2a2745e1c82b5d2d4b031c67a"} Nov 25 16:51:59 crc kubenswrapper[4812]: I1125 16:51:59.517560 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vwfd7" event={"ID":"fc701cb7-bf87-4c7a-b95d-c88ec018171c","Type":"ContainerStarted","Data":"796cdb7c3a79f442457b9d636f54ae2ad03e5a2ae283693e01a88175e6f595cc"} Nov 25 16:51:59 crc kubenswrapper[4812]: I1125 16:51:59.519587 4812 generic.go:334] "Generic (PLEG): container finished" podID="d893abf0-5f20-4874-ad11-48bd8a156929" containerID="75bb2efbbcd0369938defe59ef24d74dd36e6a7093c97a474cf9e315a7326028" exitCode=0 Nov 25 16:51:59 crc kubenswrapper[4812]: I1125 16:51:59.519634 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cgdkf" event={"ID":"d893abf0-5f20-4874-ad11-48bd8a156929","Type":"ContainerDied","Data":"75bb2efbbcd0369938defe59ef24d74dd36e6a7093c97a474cf9e315a7326028"} Nov 25 16:51:59 crc kubenswrapper[4812]: I1125 16:51:59.524760 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s69c5" event={"ID":"5be2bd70-a890-4af3-a0fc-5a87851211f9","Type":"ContainerStarted","Data":"83ca09bd6d9cee9fa9573b9f20ff68c23bf53a75955c312aa8d84f991b55961d"} Nov 25 16:51:59 crc kubenswrapper[4812]: I1125 16:51:59.528798 4812 generic.go:334] "Generic (PLEG): container finished" podID="d6e4606b-d577-4d2d-abfe-b001356839b6" containerID="b57de8800efa08b730c4b50c5e0642087a0bf9753f151eed09c05066eaf47af5" exitCode=0 Nov 25 16:51:59 crc kubenswrapper[4812]: I1125 16:51:59.528829 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lhng2" event={"ID":"d6e4606b-d577-4d2d-abfe-b001356839b6","Type":"ContainerDied","Data":"b57de8800efa08b730c4b50c5e0642087a0bf9753f151eed09c05066eaf47af5"} Nov 25 16:51:59 crc kubenswrapper[4812]: I1125 16:51:59.528847 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lhng2" event={"ID":"d6e4606b-d577-4d2d-abfe-b001356839b6","Type":"ContainerStarted","Data":"ad94adf71f99a3d004565b59125b5cd234d0eb3d2c84571ca6e48aa3358f24f7"} Nov 25 16:51:59 crc kubenswrapper[4812]: I1125 16:51:59.592908 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-s69c5" podStartSLOduration=3.198184741 podStartE2EDuration="4.592888172s" podCreationTimestamp="2025-11-25 16:51:55 +0000 UTC" firstStartedPulling="2025-11-25 16:51:57.500899672 +0000 UTC m=+292.341041767" lastFinishedPulling="2025-11-25 16:51:58.895603103 +0000 UTC m=+293.735745198" observedRunningTime="2025-11-25 16:51:59.570551895 +0000 UTC m=+294.410693990" watchObservedRunningTime="2025-11-25 16:51:59.592888172 +0000 UTC m=+294.433030257" Nov 25 16:52:00 crc kubenswrapper[4812]: I1125 16:52:00.538182 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cgdkf" event={"ID":"d893abf0-5f20-4874-ad11-48bd8a156929","Type":"ContainerStarted","Data":"27d7dac66cc79bf21460ff75d7bcf4d8486ec35bb21db672b4778d349e3546fa"} Nov 25 16:52:00 crc kubenswrapper[4812]: I1125 16:52:00.541153 4812 generic.go:334] "Generic (PLEG): container finished" podID="d6e4606b-d577-4d2d-abfe-b001356839b6" containerID="0ea84083b6721179091f2b703530d2c28ba94e0ddf3832f858b159a8e117fee7" exitCode=0 Nov 25 16:52:00 crc kubenswrapper[4812]: I1125 16:52:00.541254 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lhng2" event={"ID":"d6e4606b-d577-4d2d-abfe-b001356839b6","Type":"ContainerDied","Data":"0ea84083b6721179091f2b703530d2c28ba94e0ddf3832f858b159a8e117fee7"} Nov 25 16:52:00 crc kubenswrapper[4812]: I1125 16:52:00.561950 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-cgdkf" podStartSLOduration=2.035215968 podStartE2EDuration="4.561927164s" podCreationTimestamp="2025-11-25 16:51:56 +0000 UTC" firstStartedPulling="2025-11-25 16:51:57.495754261 +0000 UTC m=+292.335896356" lastFinishedPulling="2025-11-25 16:52:00.022465457 +0000 UTC m=+294.862607552" observedRunningTime="2025-11-25 16:52:00.558145173 +0000 UTC m=+295.398287288" watchObservedRunningTime="2025-11-25 16:52:00.561927164 +0000 UTC m=+295.402069259" Nov 25 16:52:01 crc kubenswrapper[4812]: I1125 16:52:01.548588 4812 generic.go:334] "Generic (PLEG): container finished" podID="fc701cb7-bf87-4c7a-b95d-c88ec018171c" containerID="64b778bef7bc8f2caa6101bc44d0ccf8eab0d7d73d7eaba88295179c3ea8650b" exitCode=0 Nov 25 16:52:01 crc kubenswrapper[4812]: I1125 16:52:01.548660 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vwfd7" event={"ID":"fc701cb7-bf87-4c7a-b95d-c88ec018171c","Type":"ContainerDied","Data":"64b778bef7bc8f2caa6101bc44d0ccf8eab0d7d73d7eaba88295179c3ea8650b"} Nov 25 16:52:03 crc kubenswrapper[4812]: I1125 16:52:03.566735 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vwfd7" event={"ID":"fc701cb7-bf87-4c7a-b95d-c88ec018171c","Type":"ContainerStarted","Data":"662fd8727ff013ca2265ac4f55b9f3b354303b51560e27f03d02e1e3a34b781d"} Nov 25 16:52:03 crc kubenswrapper[4812]: I1125 16:52:03.570325 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lhng2" event={"ID":"d6e4606b-d577-4d2d-abfe-b001356839b6","Type":"ContainerStarted","Data":"8b63cd05dd4648c46022b281f1d6c33c688b06e0e3a8585fd5dfb551da4a8c6f"} Nov 25 16:52:03 crc kubenswrapper[4812]: I1125 16:52:03.583851 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-vwfd7" podStartSLOduration=2.98639861 podStartE2EDuration="5.583834286s" podCreationTimestamp="2025-11-25 16:51:58 +0000 UTC" firstStartedPulling="2025-11-25 16:51:59.519284038 +0000 UTC m=+294.359426133" lastFinishedPulling="2025-11-25 16:52:02.116719674 +0000 UTC m=+296.956861809" observedRunningTime="2025-11-25 16:52:03.581423754 +0000 UTC m=+298.421565859" watchObservedRunningTime="2025-11-25 16:52:03.583834286 +0000 UTC m=+298.423976381" Nov 25 16:52:03 crc kubenswrapper[4812]: I1125 16:52:03.600030 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-lhng2" podStartSLOduration=3.5653896080000003 podStartE2EDuration="5.600000111s" podCreationTimestamp="2025-11-25 16:51:58 +0000 UTC" firstStartedPulling="2025-11-25 16:51:59.529935771 +0000 UTC m=+294.370077866" lastFinishedPulling="2025-11-25 16:52:01.564546274 +0000 UTC m=+296.404688369" observedRunningTime="2025-11-25 16:52:03.597357033 +0000 UTC m=+298.437499138" watchObservedRunningTime="2025-11-25 16:52:03.600000111 +0000 UTC m=+298.440142196" Nov 25 16:52:06 crc kubenswrapper[4812]: I1125 16:52:06.320029 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-s69c5" Nov 25 16:52:06 crc kubenswrapper[4812]: I1125 16:52:06.320375 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-s69c5" Nov 25 16:52:06 crc kubenswrapper[4812]: I1125 16:52:06.370663 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-s69c5" Nov 25 16:52:06 crc kubenswrapper[4812]: I1125 16:52:06.540705 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-cgdkf" Nov 25 16:52:06 crc kubenswrapper[4812]: I1125 16:52:06.540851 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-cgdkf" Nov 25 16:52:06 crc kubenswrapper[4812]: I1125 16:52:06.577556 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-cgdkf" Nov 25 16:52:06 crc kubenswrapper[4812]: I1125 16:52:06.630230 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-cgdkf" Nov 25 16:52:06 crc kubenswrapper[4812]: I1125 16:52:06.631646 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-s69c5" Nov 25 16:52:08 crc kubenswrapper[4812]: I1125 16:52:08.768748 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-lhng2" Nov 25 16:52:08 crc kubenswrapper[4812]: I1125 16:52:08.769133 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-lhng2" Nov 25 16:52:08 crc kubenswrapper[4812]: I1125 16:52:08.807661 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-lhng2" Nov 25 16:52:08 crc kubenswrapper[4812]: I1125 16:52:08.941869 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-vwfd7" Nov 25 16:52:08 crc kubenswrapper[4812]: I1125 16:52:08.941934 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-vwfd7" Nov 25 16:52:09 crc kubenswrapper[4812]: I1125 16:52:09.006216 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-vwfd7" Nov 25 16:52:09 crc kubenswrapper[4812]: I1125 16:52:09.648127 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-vwfd7" Nov 25 16:52:09 crc kubenswrapper[4812]: I1125 16:52:09.651892 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-lhng2" Nov 25 16:53:27 crc kubenswrapper[4812]: I1125 16:53:27.332960 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:53:27 crc kubenswrapper[4812]: I1125 16:53:27.333918 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:53:57 crc kubenswrapper[4812]: I1125 16:53:57.332612 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:53:57 crc kubenswrapper[4812]: I1125 16:53:57.333208 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:54:27 crc kubenswrapper[4812]: I1125 16:54:27.332458 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:54:27 crc kubenswrapper[4812]: I1125 16:54:27.333071 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:54:27 crc kubenswrapper[4812]: I1125 16:54:27.333125 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" Nov 25 16:54:27 crc kubenswrapper[4812]: I1125 16:54:27.333711 4812 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6a4a8e154a6e12db11fb5ac4d932b68107c12d9d0d7b66465ae6e941c31a59a0"} pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 16:54:27 crc kubenswrapper[4812]: I1125 16:54:27.333774 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" containerID="cri-o://6a4a8e154a6e12db11fb5ac4d932b68107c12d9d0d7b66465ae6e941c31a59a0" gracePeriod=600 Nov 25 16:54:27 crc kubenswrapper[4812]: I1125 16:54:27.609987 4812 generic.go:334] "Generic (PLEG): container finished" podID="8ed911cf-2139-4b12-84ba-af635585ba29" containerID="6a4a8e154a6e12db11fb5ac4d932b68107c12d9d0d7b66465ae6e941c31a59a0" exitCode=0 Nov 25 16:54:27 crc kubenswrapper[4812]: I1125 16:54:27.610198 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" event={"ID":"8ed911cf-2139-4b12-84ba-af635585ba29","Type":"ContainerDied","Data":"6a4a8e154a6e12db11fb5ac4d932b68107c12d9d0d7b66465ae6e941c31a59a0"} Nov 25 16:54:27 crc kubenswrapper[4812]: I1125 16:54:27.610223 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" event={"ID":"8ed911cf-2139-4b12-84ba-af635585ba29","Type":"ContainerStarted","Data":"3b26e577296869e8ae3c303744ab388e7654f5620c398425aadc8669e43297d8"} Nov 25 16:54:27 crc kubenswrapper[4812]: I1125 16:54:27.610237 4812 scope.go:117] "RemoveContainer" containerID="e00a717540f4d5f1500c4463c6ca6a0e8e999165da3083679094be548a442ca6" Nov 25 16:54:30 crc kubenswrapper[4812]: I1125 16:54:30.871834 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-s6n79"] Nov 25 16:54:30 crc kubenswrapper[4812]: I1125 16:54:30.873020 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-s6n79" Nov 25 16:54:30 crc kubenswrapper[4812]: I1125 16:54:30.894642 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-s6n79"] Nov 25 16:54:31 crc kubenswrapper[4812]: I1125 16:54:31.039903 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/30639753-9701-4f66-8b5c-cd2dc5e6d373-registry-certificates\") pod \"image-registry-66df7c8f76-s6n79\" (UID: \"30639753-9701-4f66-8b5c-cd2dc5e6d373\") " pod="openshift-image-registry/image-registry-66df7c8f76-s6n79" Nov 25 16:54:31 crc kubenswrapper[4812]: I1125 16:54:31.039966 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6zp2l\" (UniqueName: \"kubernetes.io/projected/30639753-9701-4f66-8b5c-cd2dc5e6d373-kube-api-access-6zp2l\") pod \"image-registry-66df7c8f76-s6n79\" (UID: \"30639753-9701-4f66-8b5c-cd2dc5e6d373\") " pod="openshift-image-registry/image-registry-66df7c8f76-s6n79" Nov 25 16:54:31 crc kubenswrapper[4812]: I1125 16:54:31.039997 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/30639753-9701-4f66-8b5c-cd2dc5e6d373-ca-trust-extracted\") pod \"image-registry-66df7c8f76-s6n79\" (UID: \"30639753-9701-4f66-8b5c-cd2dc5e6d373\") " pod="openshift-image-registry/image-registry-66df7c8f76-s6n79" Nov 25 16:54:31 crc kubenswrapper[4812]: I1125 16:54:31.040019 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/30639753-9701-4f66-8b5c-cd2dc5e6d373-bound-sa-token\") pod \"image-registry-66df7c8f76-s6n79\" (UID: \"30639753-9701-4f66-8b5c-cd2dc5e6d373\") " pod="openshift-image-registry/image-registry-66df7c8f76-s6n79" Nov 25 16:54:31 crc kubenswrapper[4812]: I1125 16:54:31.040046 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/30639753-9701-4f66-8b5c-cd2dc5e6d373-registry-tls\") pod \"image-registry-66df7c8f76-s6n79\" (UID: \"30639753-9701-4f66-8b5c-cd2dc5e6d373\") " pod="openshift-image-registry/image-registry-66df7c8f76-s6n79" Nov 25 16:54:31 crc kubenswrapper[4812]: I1125 16:54:31.040095 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-s6n79\" (UID: \"30639753-9701-4f66-8b5c-cd2dc5e6d373\") " pod="openshift-image-registry/image-registry-66df7c8f76-s6n79" Nov 25 16:54:31 crc kubenswrapper[4812]: I1125 16:54:31.040121 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/30639753-9701-4f66-8b5c-cd2dc5e6d373-trusted-ca\") pod \"image-registry-66df7c8f76-s6n79\" (UID: \"30639753-9701-4f66-8b5c-cd2dc5e6d373\") " pod="openshift-image-registry/image-registry-66df7c8f76-s6n79" Nov 25 16:54:31 crc kubenswrapper[4812]: I1125 16:54:31.040148 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/30639753-9701-4f66-8b5c-cd2dc5e6d373-installation-pull-secrets\") pod \"image-registry-66df7c8f76-s6n79\" (UID: \"30639753-9701-4f66-8b5c-cd2dc5e6d373\") " pod="openshift-image-registry/image-registry-66df7c8f76-s6n79" Nov 25 16:54:31 crc kubenswrapper[4812]: I1125 16:54:31.057981 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-s6n79\" (UID: \"30639753-9701-4f66-8b5c-cd2dc5e6d373\") " pod="openshift-image-registry/image-registry-66df7c8f76-s6n79" Nov 25 16:54:31 crc kubenswrapper[4812]: I1125 16:54:31.141100 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6zp2l\" (UniqueName: \"kubernetes.io/projected/30639753-9701-4f66-8b5c-cd2dc5e6d373-kube-api-access-6zp2l\") pod \"image-registry-66df7c8f76-s6n79\" (UID: \"30639753-9701-4f66-8b5c-cd2dc5e6d373\") " pod="openshift-image-registry/image-registry-66df7c8f76-s6n79" Nov 25 16:54:31 crc kubenswrapper[4812]: I1125 16:54:31.141155 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/30639753-9701-4f66-8b5c-cd2dc5e6d373-ca-trust-extracted\") pod \"image-registry-66df7c8f76-s6n79\" (UID: \"30639753-9701-4f66-8b5c-cd2dc5e6d373\") " pod="openshift-image-registry/image-registry-66df7c8f76-s6n79" Nov 25 16:54:31 crc kubenswrapper[4812]: I1125 16:54:31.141177 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/30639753-9701-4f66-8b5c-cd2dc5e6d373-bound-sa-token\") pod \"image-registry-66df7c8f76-s6n79\" (UID: \"30639753-9701-4f66-8b5c-cd2dc5e6d373\") " pod="openshift-image-registry/image-registry-66df7c8f76-s6n79" Nov 25 16:54:31 crc kubenswrapper[4812]: I1125 16:54:31.141196 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/30639753-9701-4f66-8b5c-cd2dc5e6d373-registry-tls\") pod \"image-registry-66df7c8f76-s6n79\" (UID: \"30639753-9701-4f66-8b5c-cd2dc5e6d373\") " pod="openshift-image-registry/image-registry-66df7c8f76-s6n79" Nov 25 16:54:31 crc kubenswrapper[4812]: I1125 16:54:31.141230 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/30639753-9701-4f66-8b5c-cd2dc5e6d373-trusted-ca\") pod \"image-registry-66df7c8f76-s6n79\" (UID: \"30639753-9701-4f66-8b5c-cd2dc5e6d373\") " pod="openshift-image-registry/image-registry-66df7c8f76-s6n79" Nov 25 16:54:31 crc kubenswrapper[4812]: I1125 16:54:31.141250 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/30639753-9701-4f66-8b5c-cd2dc5e6d373-installation-pull-secrets\") pod \"image-registry-66df7c8f76-s6n79\" (UID: \"30639753-9701-4f66-8b5c-cd2dc5e6d373\") " pod="openshift-image-registry/image-registry-66df7c8f76-s6n79" Nov 25 16:54:31 crc kubenswrapper[4812]: I1125 16:54:31.141295 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/30639753-9701-4f66-8b5c-cd2dc5e6d373-registry-certificates\") pod \"image-registry-66df7c8f76-s6n79\" (UID: \"30639753-9701-4f66-8b5c-cd2dc5e6d373\") " pod="openshift-image-registry/image-registry-66df7c8f76-s6n79" Nov 25 16:54:31 crc kubenswrapper[4812]: I1125 16:54:31.141746 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/30639753-9701-4f66-8b5c-cd2dc5e6d373-ca-trust-extracted\") pod \"image-registry-66df7c8f76-s6n79\" (UID: \"30639753-9701-4f66-8b5c-cd2dc5e6d373\") " pod="openshift-image-registry/image-registry-66df7c8f76-s6n79" Nov 25 16:54:31 crc kubenswrapper[4812]: I1125 16:54:31.142548 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/30639753-9701-4f66-8b5c-cd2dc5e6d373-registry-certificates\") pod \"image-registry-66df7c8f76-s6n79\" (UID: \"30639753-9701-4f66-8b5c-cd2dc5e6d373\") " pod="openshift-image-registry/image-registry-66df7c8f76-s6n79" Nov 25 16:54:31 crc kubenswrapper[4812]: I1125 16:54:31.142573 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/30639753-9701-4f66-8b5c-cd2dc5e6d373-trusted-ca\") pod \"image-registry-66df7c8f76-s6n79\" (UID: \"30639753-9701-4f66-8b5c-cd2dc5e6d373\") " pod="openshift-image-registry/image-registry-66df7c8f76-s6n79" Nov 25 16:54:31 crc kubenswrapper[4812]: I1125 16:54:31.148211 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/30639753-9701-4f66-8b5c-cd2dc5e6d373-installation-pull-secrets\") pod \"image-registry-66df7c8f76-s6n79\" (UID: \"30639753-9701-4f66-8b5c-cd2dc5e6d373\") " pod="openshift-image-registry/image-registry-66df7c8f76-s6n79" Nov 25 16:54:31 crc kubenswrapper[4812]: I1125 16:54:31.149427 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/30639753-9701-4f66-8b5c-cd2dc5e6d373-registry-tls\") pod \"image-registry-66df7c8f76-s6n79\" (UID: \"30639753-9701-4f66-8b5c-cd2dc5e6d373\") " pod="openshift-image-registry/image-registry-66df7c8f76-s6n79" Nov 25 16:54:31 crc kubenswrapper[4812]: I1125 16:54:31.158438 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/30639753-9701-4f66-8b5c-cd2dc5e6d373-bound-sa-token\") pod \"image-registry-66df7c8f76-s6n79\" (UID: \"30639753-9701-4f66-8b5c-cd2dc5e6d373\") " pod="openshift-image-registry/image-registry-66df7c8f76-s6n79" Nov 25 16:54:31 crc kubenswrapper[4812]: I1125 16:54:31.159088 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6zp2l\" (UniqueName: \"kubernetes.io/projected/30639753-9701-4f66-8b5c-cd2dc5e6d373-kube-api-access-6zp2l\") pod \"image-registry-66df7c8f76-s6n79\" (UID: \"30639753-9701-4f66-8b5c-cd2dc5e6d373\") " pod="openshift-image-registry/image-registry-66df7c8f76-s6n79" Nov 25 16:54:31 crc kubenswrapper[4812]: I1125 16:54:31.187332 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-s6n79" Nov 25 16:54:31 crc kubenswrapper[4812]: I1125 16:54:31.367840 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-s6n79"] Nov 25 16:54:31 crc kubenswrapper[4812]: W1125 16:54:31.377287 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod30639753_9701_4f66_8b5c_cd2dc5e6d373.slice/crio-338f39b36e71571000dd9988016188a40c936a52b357f2eef3e7c1c8e8fb4427 WatchSource:0}: Error finding container 338f39b36e71571000dd9988016188a40c936a52b357f2eef3e7c1c8e8fb4427: Status 404 returned error can't find the container with id 338f39b36e71571000dd9988016188a40c936a52b357f2eef3e7c1c8e8fb4427 Nov 25 16:54:31 crc kubenswrapper[4812]: I1125 16:54:31.637109 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-s6n79" event={"ID":"30639753-9701-4f66-8b5c-cd2dc5e6d373","Type":"ContainerStarted","Data":"da4c610fe64cb2ef1eac8fb63567133f7afe32f7dd6f3ce22b42943b3410d788"} Nov 25 16:54:31 crc kubenswrapper[4812]: I1125 16:54:31.637367 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-s6n79" event={"ID":"30639753-9701-4f66-8b5c-cd2dc5e6d373","Type":"ContainerStarted","Data":"338f39b36e71571000dd9988016188a40c936a52b357f2eef3e7c1c8e8fb4427"} Nov 25 16:54:31 crc kubenswrapper[4812]: I1125 16:54:31.637384 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-s6n79" Nov 25 16:54:31 crc kubenswrapper[4812]: I1125 16:54:31.657778 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-s6n79" podStartSLOduration=1.657756322 podStartE2EDuration="1.657756322s" podCreationTimestamp="2025-11-25 16:54:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:54:31.654030307 +0000 UTC m=+446.494172412" watchObservedRunningTime="2025-11-25 16:54:31.657756322 +0000 UTC m=+446.497898417" Nov 25 16:54:51 crc kubenswrapper[4812]: I1125 16:54:51.193889 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-s6n79" Nov 25 16:54:51 crc kubenswrapper[4812]: I1125 16:54:51.239095 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-h6c4h"] Nov 25 16:55:16 crc kubenswrapper[4812]: I1125 16:55:16.272360 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" podUID="3c23f6ed-bcde-4571-b631-c90ce20d9348" containerName="registry" containerID="cri-o://e346ab79554d0cac3f20a6f0afce29109a420164ac164470405fd4689adaf40e" gracePeriod=30 Nov 25 16:55:16 crc kubenswrapper[4812]: I1125 16:55:16.648878 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:55:16 crc kubenswrapper[4812]: I1125 16:55:16.740642 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/3c23f6ed-bcde-4571-b631-c90ce20d9348-ca-trust-extracted\") pod \"3c23f6ed-bcde-4571-b631-c90ce20d9348\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " Nov 25 16:55:16 crc kubenswrapper[4812]: I1125 16:55:16.740695 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/3c23f6ed-bcde-4571-b631-c90ce20d9348-registry-tls\") pod \"3c23f6ed-bcde-4571-b631-c90ce20d9348\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " Nov 25 16:55:16 crc kubenswrapper[4812]: I1125 16:55:16.740718 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3c23f6ed-bcde-4571-b631-c90ce20d9348-trusted-ca\") pod \"3c23f6ed-bcde-4571-b631-c90ce20d9348\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " Nov 25 16:55:16 crc kubenswrapper[4812]: I1125 16:55:16.740753 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3c23f6ed-bcde-4571-b631-c90ce20d9348-bound-sa-token\") pod \"3c23f6ed-bcde-4571-b631-c90ce20d9348\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " Nov 25 16:55:16 crc kubenswrapper[4812]: I1125 16:55:16.740773 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/3c23f6ed-bcde-4571-b631-c90ce20d9348-registry-certificates\") pod \"3c23f6ed-bcde-4571-b631-c90ce20d9348\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " Nov 25 16:55:16 crc kubenswrapper[4812]: I1125 16:55:16.740798 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/3c23f6ed-bcde-4571-b631-c90ce20d9348-installation-pull-secrets\") pod \"3c23f6ed-bcde-4571-b631-c90ce20d9348\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " Nov 25 16:55:16 crc kubenswrapper[4812]: I1125 16:55:16.741016 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"3c23f6ed-bcde-4571-b631-c90ce20d9348\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " Nov 25 16:55:16 crc kubenswrapper[4812]: I1125 16:55:16.741039 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fhsns\" (UniqueName: \"kubernetes.io/projected/3c23f6ed-bcde-4571-b631-c90ce20d9348-kube-api-access-fhsns\") pod \"3c23f6ed-bcde-4571-b631-c90ce20d9348\" (UID: \"3c23f6ed-bcde-4571-b631-c90ce20d9348\") " Nov 25 16:55:16 crc kubenswrapper[4812]: I1125 16:55:16.742104 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c23f6ed-bcde-4571-b631-c90ce20d9348-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "3c23f6ed-bcde-4571-b631-c90ce20d9348" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:55:16 crc kubenswrapper[4812]: I1125 16:55:16.742125 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c23f6ed-bcde-4571-b631-c90ce20d9348-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "3c23f6ed-bcde-4571-b631-c90ce20d9348" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:55:16 crc kubenswrapper[4812]: I1125 16:55:16.747005 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c23f6ed-bcde-4571-b631-c90ce20d9348-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "3c23f6ed-bcde-4571-b631-c90ce20d9348" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:55:16 crc kubenswrapper[4812]: I1125 16:55:16.747319 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c23f6ed-bcde-4571-b631-c90ce20d9348-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "3c23f6ed-bcde-4571-b631-c90ce20d9348" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:55:16 crc kubenswrapper[4812]: I1125 16:55:16.748747 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c23f6ed-bcde-4571-b631-c90ce20d9348-kube-api-access-fhsns" (OuterVolumeSpecName: "kube-api-access-fhsns") pod "3c23f6ed-bcde-4571-b631-c90ce20d9348" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348"). InnerVolumeSpecName "kube-api-access-fhsns". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:55:16 crc kubenswrapper[4812]: I1125 16:55:16.749142 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c23f6ed-bcde-4571-b631-c90ce20d9348-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "3c23f6ed-bcde-4571-b631-c90ce20d9348" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:55:16 crc kubenswrapper[4812]: I1125 16:55:16.753962 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "3c23f6ed-bcde-4571-b631-c90ce20d9348" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 25 16:55:16 crc kubenswrapper[4812]: I1125 16:55:16.765428 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3c23f6ed-bcde-4571-b631-c90ce20d9348-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "3c23f6ed-bcde-4571-b631-c90ce20d9348" (UID: "3c23f6ed-bcde-4571-b631-c90ce20d9348"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:55:16 crc kubenswrapper[4812]: I1125 16:55:16.842135 4812 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/3c23f6ed-bcde-4571-b631-c90ce20d9348-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 25 16:55:16 crc kubenswrapper[4812]: I1125 16:55:16.842180 4812 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3c23f6ed-bcde-4571-b631-c90ce20d9348-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 16:55:16 crc kubenswrapper[4812]: I1125 16:55:16.842191 4812 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3c23f6ed-bcde-4571-b631-c90ce20d9348-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 16:55:16 crc kubenswrapper[4812]: I1125 16:55:16.842204 4812 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/3c23f6ed-bcde-4571-b631-c90ce20d9348-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 25 16:55:16 crc kubenswrapper[4812]: I1125 16:55:16.842219 4812 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/3c23f6ed-bcde-4571-b631-c90ce20d9348-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 25 16:55:16 crc kubenswrapper[4812]: I1125 16:55:16.842230 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fhsns\" (UniqueName: \"kubernetes.io/projected/3c23f6ed-bcde-4571-b631-c90ce20d9348-kube-api-access-fhsns\") on node \"crc\" DevicePath \"\"" Nov 25 16:55:16 crc kubenswrapper[4812]: I1125 16:55:16.842240 4812 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/3c23f6ed-bcde-4571-b631-c90ce20d9348-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 25 16:55:16 crc kubenswrapper[4812]: I1125 16:55:16.873122 4812 generic.go:334] "Generic (PLEG): container finished" podID="3c23f6ed-bcde-4571-b631-c90ce20d9348" containerID="e346ab79554d0cac3f20a6f0afce29109a420164ac164470405fd4689adaf40e" exitCode=0 Nov 25 16:55:16 crc kubenswrapper[4812]: I1125 16:55:16.873185 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" event={"ID":"3c23f6ed-bcde-4571-b631-c90ce20d9348","Type":"ContainerDied","Data":"e346ab79554d0cac3f20a6f0afce29109a420164ac164470405fd4689adaf40e"} Nov 25 16:55:16 crc kubenswrapper[4812]: I1125 16:55:16.873238 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" event={"ID":"3c23f6ed-bcde-4571-b631-c90ce20d9348","Type":"ContainerDied","Data":"ac5047a5a8814bb6c280a6a51c0d9c55acf2cffab53c613aaf234a99ba289ba7"} Nov 25 16:55:16 crc kubenswrapper[4812]: I1125 16:55:16.873255 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-h6c4h" Nov 25 16:55:16 crc kubenswrapper[4812]: I1125 16:55:16.873268 4812 scope.go:117] "RemoveContainer" containerID="e346ab79554d0cac3f20a6f0afce29109a420164ac164470405fd4689adaf40e" Nov 25 16:55:16 crc kubenswrapper[4812]: I1125 16:55:16.888309 4812 scope.go:117] "RemoveContainer" containerID="e346ab79554d0cac3f20a6f0afce29109a420164ac164470405fd4689adaf40e" Nov 25 16:55:16 crc kubenswrapper[4812]: E1125 16:55:16.888947 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e346ab79554d0cac3f20a6f0afce29109a420164ac164470405fd4689adaf40e\": container with ID starting with e346ab79554d0cac3f20a6f0afce29109a420164ac164470405fd4689adaf40e not found: ID does not exist" containerID="e346ab79554d0cac3f20a6f0afce29109a420164ac164470405fd4689adaf40e" Nov 25 16:55:16 crc kubenswrapper[4812]: I1125 16:55:16.889012 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e346ab79554d0cac3f20a6f0afce29109a420164ac164470405fd4689adaf40e"} err="failed to get container status \"e346ab79554d0cac3f20a6f0afce29109a420164ac164470405fd4689adaf40e\": rpc error: code = NotFound desc = could not find container \"e346ab79554d0cac3f20a6f0afce29109a420164ac164470405fd4689adaf40e\": container with ID starting with e346ab79554d0cac3f20a6f0afce29109a420164ac164470405fd4689adaf40e not found: ID does not exist" Nov 25 16:55:16 crc kubenswrapper[4812]: I1125 16:55:16.913215 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-h6c4h"] Nov 25 16:55:16 crc kubenswrapper[4812]: I1125 16:55:16.918335 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-h6c4h"] Nov 25 16:55:17 crc kubenswrapper[4812]: I1125 16:55:17.837602 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c23f6ed-bcde-4571-b631-c90ce20d9348" path="/var/lib/kubelet/pods/3c23f6ed-bcde-4571-b631-c90ce20d9348/volumes" Nov 25 16:56:27 crc kubenswrapper[4812]: I1125 16:56:27.333203 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:56:27 crc kubenswrapper[4812]: I1125 16:56:27.334550 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:56:57 crc kubenswrapper[4812]: I1125 16:56:57.332460 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:56:57 crc kubenswrapper[4812]: I1125 16:56:57.333057 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:57:24 crc kubenswrapper[4812]: I1125 16:57:24.011468 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-hrgwr"] Nov 25 16:57:24 crc kubenswrapper[4812]: E1125 16:57:24.012408 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c23f6ed-bcde-4571-b631-c90ce20d9348" containerName="registry" Nov 25 16:57:24 crc kubenswrapper[4812]: I1125 16:57:24.012427 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c23f6ed-bcde-4571-b631-c90ce20d9348" containerName="registry" Nov 25 16:57:24 crc kubenswrapper[4812]: I1125 16:57:24.012589 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c23f6ed-bcde-4571-b631-c90ce20d9348" containerName="registry" Nov 25 16:57:24 crc kubenswrapper[4812]: I1125 16:57:24.012988 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-hblh8"] Nov 25 16:57:24 crc kubenswrapper[4812]: I1125 16:57:24.013196 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-hrgwr" Nov 25 16:57:24 crc kubenswrapper[4812]: I1125 16:57:24.013761 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-hblh8" Nov 25 16:57:24 crc kubenswrapper[4812]: I1125 16:57:24.017013 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 25 16:57:24 crc kubenswrapper[4812]: I1125 16:57:24.017216 4812 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-hrdjk" Nov 25 16:57:24 crc kubenswrapper[4812]: I1125 16:57:24.017322 4812 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-cqj5l" Nov 25 16:57:24 crc kubenswrapper[4812]: I1125 16:57:24.019039 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 25 16:57:24 crc kubenswrapper[4812]: I1125 16:57:24.022901 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-cz62v"] Nov 25 16:57:24 crc kubenswrapper[4812]: I1125 16:57:24.023467 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-cz62v" Nov 25 16:57:24 crc kubenswrapper[4812]: I1125 16:57:24.024662 4812 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-n6zbp" Nov 25 16:57:24 crc kubenswrapper[4812]: I1125 16:57:24.027979 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-hblh8"] Nov 25 16:57:24 crc kubenswrapper[4812]: I1125 16:57:24.031926 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-hrgwr"] Nov 25 16:57:24 crc kubenswrapper[4812]: I1125 16:57:24.037091 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-cz62v"] Nov 25 16:57:24 crc kubenswrapper[4812]: I1125 16:57:24.151711 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-crz72\" (UniqueName: \"kubernetes.io/projected/1caef332-0895-4118-9bd4-58f163a66c61-kube-api-access-crz72\") pod \"cert-manager-5b446d88c5-hblh8\" (UID: \"1caef332-0895-4118-9bd4-58f163a66c61\") " pod="cert-manager/cert-manager-5b446d88c5-hblh8" Nov 25 16:57:24 crc kubenswrapper[4812]: I1125 16:57:24.151761 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dg6fh\" (UniqueName: \"kubernetes.io/projected/858a2059-d271-4acb-a19f-2ceeba425ad3-kube-api-access-dg6fh\") pod \"cert-manager-webhook-5655c58dd6-cz62v\" (UID: \"858a2059-d271-4acb-a19f-2ceeba425ad3\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-cz62v" Nov 25 16:57:24 crc kubenswrapper[4812]: I1125 16:57:24.151815 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cgsr4\" (UniqueName: \"kubernetes.io/projected/2f63797d-85cb-42e8-afb2-de9d3324e853-kube-api-access-cgsr4\") pod \"cert-manager-cainjector-7f985d654d-hrgwr\" (UID: \"2f63797d-85cb-42e8-afb2-de9d3324e853\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-hrgwr" Nov 25 16:57:24 crc kubenswrapper[4812]: I1125 16:57:24.253445 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-crz72\" (UniqueName: \"kubernetes.io/projected/1caef332-0895-4118-9bd4-58f163a66c61-kube-api-access-crz72\") pod \"cert-manager-5b446d88c5-hblh8\" (UID: \"1caef332-0895-4118-9bd4-58f163a66c61\") " pod="cert-manager/cert-manager-5b446d88c5-hblh8" Nov 25 16:57:24 crc kubenswrapper[4812]: I1125 16:57:24.253593 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dg6fh\" (UniqueName: \"kubernetes.io/projected/858a2059-d271-4acb-a19f-2ceeba425ad3-kube-api-access-dg6fh\") pod \"cert-manager-webhook-5655c58dd6-cz62v\" (UID: \"858a2059-d271-4acb-a19f-2ceeba425ad3\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-cz62v" Nov 25 16:57:24 crc kubenswrapper[4812]: I1125 16:57:24.253729 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cgsr4\" (UniqueName: \"kubernetes.io/projected/2f63797d-85cb-42e8-afb2-de9d3324e853-kube-api-access-cgsr4\") pod \"cert-manager-cainjector-7f985d654d-hrgwr\" (UID: \"2f63797d-85cb-42e8-afb2-de9d3324e853\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-hrgwr" Nov 25 16:57:24 crc kubenswrapper[4812]: I1125 16:57:24.272155 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cgsr4\" (UniqueName: \"kubernetes.io/projected/2f63797d-85cb-42e8-afb2-de9d3324e853-kube-api-access-cgsr4\") pod \"cert-manager-cainjector-7f985d654d-hrgwr\" (UID: \"2f63797d-85cb-42e8-afb2-de9d3324e853\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-hrgwr" Nov 25 16:57:24 crc kubenswrapper[4812]: I1125 16:57:24.272159 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-crz72\" (UniqueName: \"kubernetes.io/projected/1caef332-0895-4118-9bd4-58f163a66c61-kube-api-access-crz72\") pod \"cert-manager-5b446d88c5-hblh8\" (UID: \"1caef332-0895-4118-9bd4-58f163a66c61\") " pod="cert-manager/cert-manager-5b446d88c5-hblh8" Nov 25 16:57:24 crc kubenswrapper[4812]: I1125 16:57:24.276430 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dg6fh\" (UniqueName: \"kubernetes.io/projected/858a2059-d271-4acb-a19f-2ceeba425ad3-kube-api-access-dg6fh\") pod \"cert-manager-webhook-5655c58dd6-cz62v\" (UID: \"858a2059-d271-4acb-a19f-2ceeba425ad3\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-cz62v" Nov 25 16:57:24 crc kubenswrapper[4812]: I1125 16:57:24.336805 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-hrgwr" Nov 25 16:57:24 crc kubenswrapper[4812]: I1125 16:57:24.351495 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-hblh8" Nov 25 16:57:24 crc kubenswrapper[4812]: I1125 16:57:24.362741 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-cz62v" Nov 25 16:57:24 crc kubenswrapper[4812]: I1125 16:57:24.533962 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-hrgwr"] Nov 25 16:57:24 crc kubenswrapper[4812]: I1125 16:57:24.544194 4812 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 16:57:24 crc kubenswrapper[4812]: I1125 16:57:24.559214 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-hblh8"] Nov 25 16:57:24 crc kubenswrapper[4812]: W1125 16:57:24.568424 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1caef332_0895_4118_9bd4_58f163a66c61.slice/crio-5708f36e59471cf2d5197b1b8d29736958f92cfa8aa2cb8ef7eee1f964fabfb8 WatchSource:0}: Error finding container 5708f36e59471cf2d5197b1b8d29736958f92cfa8aa2cb8ef7eee1f964fabfb8: Status 404 returned error can't find the container with id 5708f36e59471cf2d5197b1b8d29736958f92cfa8aa2cb8ef7eee1f964fabfb8 Nov 25 16:57:24 crc kubenswrapper[4812]: I1125 16:57:24.574220 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-hrgwr" event={"ID":"2f63797d-85cb-42e8-afb2-de9d3324e853","Type":"ContainerStarted","Data":"1ae64cf2cc036ab92be6ac567343e915b891fdd2af3a9e7b93349f2c89d2a42c"} Nov 25 16:57:24 crc kubenswrapper[4812]: I1125 16:57:24.597510 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-cz62v"] Nov 25 16:57:24 crc kubenswrapper[4812]: W1125 16:57:24.601234 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod858a2059_d271_4acb_a19f_2ceeba425ad3.slice/crio-a24107f59b431a10c8cf84f85ee6f2f30b2262de9cbf96db3695e2c3008fd2f8 WatchSource:0}: Error finding container a24107f59b431a10c8cf84f85ee6f2f30b2262de9cbf96db3695e2c3008fd2f8: Status 404 returned error can't find the container with id a24107f59b431a10c8cf84f85ee6f2f30b2262de9cbf96db3695e2c3008fd2f8 Nov 25 16:57:25 crc kubenswrapper[4812]: I1125 16:57:25.584955 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-cz62v" event={"ID":"858a2059-d271-4acb-a19f-2ceeba425ad3","Type":"ContainerStarted","Data":"a24107f59b431a10c8cf84f85ee6f2f30b2262de9cbf96db3695e2c3008fd2f8"} Nov 25 16:57:25 crc kubenswrapper[4812]: I1125 16:57:25.587635 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-hblh8" event={"ID":"1caef332-0895-4118-9bd4-58f163a66c61","Type":"ContainerStarted","Data":"5708f36e59471cf2d5197b1b8d29736958f92cfa8aa2cb8ef7eee1f964fabfb8"} Nov 25 16:57:27 crc kubenswrapper[4812]: I1125 16:57:27.332837 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:57:27 crc kubenswrapper[4812]: I1125 16:57:27.333315 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:57:27 crc kubenswrapper[4812]: I1125 16:57:27.333423 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" Nov 25 16:57:27 crc kubenswrapper[4812]: I1125 16:57:27.334086 4812 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3b26e577296869e8ae3c303744ab388e7654f5620c398425aadc8669e43297d8"} pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 16:57:27 crc kubenswrapper[4812]: I1125 16:57:27.334151 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" containerID="cri-o://3b26e577296869e8ae3c303744ab388e7654f5620c398425aadc8669e43297d8" gracePeriod=600 Nov 25 16:57:27 crc kubenswrapper[4812]: I1125 16:57:27.598443 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-hblh8" event={"ID":"1caef332-0895-4118-9bd4-58f163a66c61","Type":"ContainerStarted","Data":"0a0b2f42efb1f2bf465435334766cc1c18e3697c7310779db3e7c97af2c806b1"} Nov 25 16:57:27 crc kubenswrapper[4812]: I1125 16:57:27.600762 4812 generic.go:334] "Generic (PLEG): container finished" podID="8ed911cf-2139-4b12-84ba-af635585ba29" containerID="3b26e577296869e8ae3c303744ab388e7654f5620c398425aadc8669e43297d8" exitCode=0 Nov 25 16:57:27 crc kubenswrapper[4812]: I1125 16:57:27.600804 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" event={"ID":"8ed911cf-2139-4b12-84ba-af635585ba29","Type":"ContainerDied","Data":"3b26e577296869e8ae3c303744ab388e7654f5620c398425aadc8669e43297d8"} Nov 25 16:57:27 crc kubenswrapper[4812]: I1125 16:57:27.600834 4812 scope.go:117] "RemoveContainer" containerID="6a4a8e154a6e12db11fb5ac4d932b68107c12d9d0d7b66465ae6e941c31a59a0" Nov 25 16:57:27 crc kubenswrapper[4812]: I1125 16:57:27.612840 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-hblh8" podStartSLOduration=2.486372562 podStartE2EDuration="4.612816419s" podCreationTimestamp="2025-11-25 16:57:23 +0000 UTC" firstStartedPulling="2025-11-25 16:57:24.574009845 +0000 UTC m=+619.414151940" lastFinishedPulling="2025-11-25 16:57:26.700453702 +0000 UTC m=+621.540595797" observedRunningTime="2025-11-25 16:57:27.610097533 +0000 UTC m=+622.450239628" watchObservedRunningTime="2025-11-25 16:57:27.612816419 +0000 UTC m=+622.452958514" Nov 25 16:57:29 crc kubenswrapper[4812]: I1125 16:57:29.613272 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-hrgwr" event={"ID":"2f63797d-85cb-42e8-afb2-de9d3324e853","Type":"ContainerStarted","Data":"0db8045d90235228e914993e8638bd4807706388ecbe2890bea78996f553c610"} Nov 25 16:57:29 crc kubenswrapper[4812]: I1125 16:57:29.616892 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" event={"ID":"8ed911cf-2139-4b12-84ba-af635585ba29","Type":"ContainerStarted","Data":"0916093b8e73989d7d0a8f475c7e60ef04e5cae4ae347d150e81560d1068b4c0"} Nov 25 16:57:29 crc kubenswrapper[4812]: I1125 16:57:29.618217 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-cz62v" event={"ID":"858a2059-d271-4acb-a19f-2ceeba425ad3","Type":"ContainerStarted","Data":"2612748d43f8431adea72b2f10da9f1ae7adf9539a08207b8a5825bae1b597fa"} Nov 25 16:57:29 crc kubenswrapper[4812]: I1125 16:57:29.618414 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-cz62v" Nov 25 16:57:29 crc kubenswrapper[4812]: I1125 16:57:29.628553 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-hrgwr" podStartSLOduration=2.483566004 podStartE2EDuration="6.628511268s" podCreationTimestamp="2025-11-25 16:57:23 +0000 UTC" firstStartedPulling="2025-11-25 16:57:24.543998315 +0000 UTC m=+619.384140410" lastFinishedPulling="2025-11-25 16:57:28.688943579 +0000 UTC m=+623.529085674" observedRunningTime="2025-11-25 16:57:29.625523924 +0000 UTC m=+624.465666019" watchObservedRunningTime="2025-11-25 16:57:29.628511268 +0000 UTC m=+624.468653383" Nov 25 16:57:29 crc kubenswrapper[4812]: I1125 16:57:29.649912 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-cz62v" podStartSLOduration=1.555060504 podStartE2EDuration="5.649889186s" podCreationTimestamp="2025-11-25 16:57:24 +0000 UTC" firstStartedPulling="2025-11-25 16:57:24.60313248 +0000 UTC m=+619.443274565" lastFinishedPulling="2025-11-25 16:57:28.697961152 +0000 UTC m=+623.538103247" observedRunningTime="2025-11-25 16:57:29.645799622 +0000 UTC m=+624.485941737" watchObservedRunningTime="2025-11-25 16:57:29.649889186 +0000 UTC m=+624.490031281" Nov 25 16:57:34 crc kubenswrapper[4812]: I1125 16:57:34.365414 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-cz62v" Nov 25 16:57:34 crc kubenswrapper[4812]: I1125 16:57:34.672970 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-hwqsk"] Nov 25 16:57:34 crc kubenswrapper[4812]: I1125 16:57:34.673967 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="ovn-controller" containerID="cri-o://93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e" gracePeriod=30 Nov 25 16:57:34 crc kubenswrapper[4812]: I1125 16:57:34.674104 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="kube-rbac-proxy-node" containerID="cri-o://6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa" gracePeriod=30 Nov 25 16:57:34 crc kubenswrapper[4812]: I1125 16:57:34.674113 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d" gracePeriod=30 Nov 25 16:57:34 crc kubenswrapper[4812]: I1125 16:57:34.674177 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="northd" containerID="cri-o://37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab" gracePeriod=30 Nov 25 16:57:34 crc kubenswrapper[4812]: I1125 16:57:34.674264 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="sbdb" containerID="cri-o://e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6" gracePeriod=30 Nov 25 16:57:34 crc kubenswrapper[4812]: I1125 16:57:34.674246 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="ovn-acl-logging" containerID="cri-o://8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd" gracePeriod=30 Nov 25 16:57:34 crc kubenswrapper[4812]: I1125 16:57:34.674012 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="nbdb" containerID="cri-o://c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f" gracePeriod=30 Nov 25 16:57:34 crc kubenswrapper[4812]: I1125 16:57:34.704880 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="ovnkube-controller" containerID="cri-o://dc30d632bc69bd00f77037cefacfbcf46c964ebbc8e8d8c286b86f80b420782d" gracePeriod=30 Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.032745 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hwqsk_bc4dc9ff-11a1-4151-91f0-3ff83020b3b9/ovnkube-controller/3.log" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.036472 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hwqsk_bc4dc9ff-11a1-4151-91f0-3ff83020b3b9/ovn-acl-logging/0.log" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.037085 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hwqsk_bc4dc9ff-11a1-4151-91f0-3ff83020b3b9/ovn-controller/0.log" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.037710 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.075779 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-kubelet\") pod \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.075833 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-ovn-node-metrics-cert\") pod \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.075850 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-run-netns\") pod \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.075896 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-run-ovn\") pod \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.075915 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-etc-openvswitch\") pod \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.075933 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-systemd-units\") pod \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.075957 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-env-overrides\") pod \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.075981 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-ovnkube-config\") pod \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.076001 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-ovnkube-script-lib\") pod \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.076017 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-slash\") pod \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.076030 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-run-openvswitch\") pod \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.075999 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" (UID: "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.076044 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" (UID: "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.076046 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-var-lib-cni-networks-ovn-kubernetes\") pod \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.076107 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-run-systemd\") pod \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.076144 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-cni-bin\") pod \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.076186 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-node-log\") pod \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.076246 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gmtpr\" (UniqueName: \"kubernetes.io/projected/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-kube-api-access-gmtpr\") pod \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.076281 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-var-lib-openvswitch\") pod \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.076314 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-cni-netd\") pod \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.076348 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-run-ovn-kubernetes\") pod \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.076379 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-log-socket\") pod \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\" (UID: \"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9\") " Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.076072 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" (UID: "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.076100 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" (UID: "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.076428 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" (UID: "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.076446 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-slash" (OuterVolumeSpecName: "host-slash") pod "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" (UID: "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.076461 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" (UID: "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.076519 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" (UID: "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.076564 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-node-log" (OuterVolumeSpecName: "node-log") pod "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" (UID: "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.076717 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" (UID: "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.076745 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" (UID: "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.076766 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" (UID: "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.076782 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" (UID: "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.076882 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" (UID: "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.076909 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-log-socket" (OuterVolumeSpecName: "log-socket") pod "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" (UID: "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.077036 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" (UID: "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.077059 4812 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-slash\") on node \"crc\" DevicePath \"\"" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.077082 4812 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.077099 4812 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.077112 4812 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.077117 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" (UID: "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.077124 4812 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-node-log\") on node \"crc\" DevicePath \"\"" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.077166 4812 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.077181 4812 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.077193 4812 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.077208 4812 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-log-socket\") on node \"crc\" DevicePath \"\"" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.077220 4812 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.077232 4812 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.077243 4812 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.077254 4812 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.077265 4812 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.077276 4812 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.085333 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-kube-api-access-gmtpr" (OuterVolumeSpecName: "kube-api-access-gmtpr") pod "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" (UID: "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9"). InnerVolumeSpecName "kube-api-access-gmtpr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.085841 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" (UID: "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.091987 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-wxgkb"] Nov 25 16:57:35 crc kubenswrapper[4812]: E1125 16:57:35.092190 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="ovnkube-controller" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.092208 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="ovnkube-controller" Nov 25 16:57:35 crc kubenswrapper[4812]: E1125 16:57:35.092218 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="ovnkube-controller" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.092224 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="ovnkube-controller" Nov 25 16:57:35 crc kubenswrapper[4812]: E1125 16:57:35.092231 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="nbdb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.092238 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="nbdb" Nov 25 16:57:35 crc kubenswrapper[4812]: E1125 16:57:35.092244 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="sbdb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.092250 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="sbdb" Nov 25 16:57:35 crc kubenswrapper[4812]: E1125 16:57:35.092259 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="ovnkube-controller" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.092265 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="ovnkube-controller" Nov 25 16:57:35 crc kubenswrapper[4812]: E1125 16:57:35.092272 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="kube-rbac-proxy-node" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.092279 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="kube-rbac-proxy-node" Nov 25 16:57:35 crc kubenswrapper[4812]: E1125 16:57:35.092287 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="ovn-acl-logging" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.092293 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="ovn-acl-logging" Nov 25 16:57:35 crc kubenswrapper[4812]: E1125 16:57:35.092301 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="kubecfg-setup" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.092307 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="kubecfg-setup" Nov 25 16:57:35 crc kubenswrapper[4812]: E1125 16:57:35.092319 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="northd" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.092324 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="northd" Nov 25 16:57:35 crc kubenswrapper[4812]: E1125 16:57:35.092333 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="kube-rbac-proxy-ovn-metrics" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.092339 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="kube-rbac-proxy-ovn-metrics" Nov 25 16:57:35 crc kubenswrapper[4812]: E1125 16:57:35.092348 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="ovn-controller" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.092355 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="ovn-controller" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.092469 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="kube-rbac-proxy-ovn-metrics" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.092485 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="ovnkube-controller" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.092495 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="kube-rbac-proxy-node" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.092502 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="ovn-acl-logging" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.092511 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="ovnkube-controller" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.092519 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="nbdb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.092526 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="ovnkube-controller" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.092590 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="northd" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.092600 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="ovn-controller" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.092608 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="sbdb" Nov 25 16:57:35 crc kubenswrapper[4812]: E1125 16:57:35.092695 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="ovnkube-controller" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.092703 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="ovnkube-controller" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.092812 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="ovnkube-controller" Nov 25 16:57:35 crc kubenswrapper[4812]: E1125 16:57:35.092910 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="ovnkube-controller" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.092920 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="ovnkube-controller" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.093023 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerName="ovnkube-controller" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.094556 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.098007 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" (UID: "bc4dc9ff-11a1-4151-91f0-3ff83020b3b9"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.177929 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/7ac84eb6-4c92-4701-81d1-babf3286e867-ovnkube-script-lib\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.177991 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-host-slash\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.178018 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-run-systemd\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.178038 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-krv8r\" (UniqueName: \"kubernetes.io/projected/7ac84eb6-4c92-4701-81d1-babf3286e867-kube-api-access-krv8r\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.178061 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-run-ovn\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.178105 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/7ac84eb6-4c92-4701-81d1-babf3286e867-ovnkube-config\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.178128 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-host-kubelet\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.178153 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-host-cni-netd\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.178175 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/7ac84eb6-4c92-4701-81d1-babf3286e867-env-overrides\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.178195 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-log-socket\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.178216 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-host-cni-bin\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.178237 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.178264 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-run-openvswitch\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.178289 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/7ac84eb6-4c92-4701-81d1-babf3286e867-ovn-node-metrics-cert\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.178316 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-node-log\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.178349 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-host-run-netns\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.178381 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-host-run-ovn-kubernetes\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.178411 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-systemd-units\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.178442 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-etc-openvswitch\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.178468 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-var-lib-openvswitch\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.178512 4812 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.178545 4812 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.178558 4812 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.178569 4812 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.178580 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gmtpr\" (UniqueName: \"kubernetes.io/projected/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9-kube-api-access-gmtpr\") on node \"crc\" DevicePath \"\"" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.279644 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/7ac84eb6-4c92-4701-81d1-babf3286e867-env-overrides\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.279700 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-log-socket\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.279716 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-host-cni-bin\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.279732 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.279751 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-run-openvswitch\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.279769 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/7ac84eb6-4c92-4701-81d1-babf3286e867-ovn-node-metrics-cert\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.279798 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-node-log\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.279820 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-host-run-netns\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.279830 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-log-socket\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.279889 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-node-log\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.279898 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.279919 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-host-run-netns\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.279844 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-host-run-ovn-kubernetes\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.279960 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-host-run-ovn-kubernetes\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.279965 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-host-cni-bin\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.280015 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-systemd-units\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.280041 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-systemd-units\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.280069 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-run-openvswitch\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.280145 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-etc-openvswitch\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.280185 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-var-lib-openvswitch\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.280237 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/7ac84eb6-4c92-4701-81d1-babf3286e867-ovnkube-script-lib\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.280269 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-host-slash\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.280296 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-run-systemd\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.280325 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-krv8r\" (UniqueName: \"kubernetes.io/projected/7ac84eb6-4c92-4701-81d1-babf3286e867-kube-api-access-krv8r\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.280369 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-run-ovn\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.280509 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-run-systemd\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.280552 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-host-slash\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.280565 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/7ac84eb6-4c92-4701-81d1-babf3286e867-ovnkube-config\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.280584 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-var-lib-openvswitch\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.280596 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-host-kubelet\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.280621 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-run-ovn\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.280593 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-etc-openvswitch\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.280628 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-host-kubelet\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.280654 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/7ac84eb6-4c92-4701-81d1-babf3286e867-env-overrides\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.280713 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-host-cni-netd\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.280763 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/7ac84eb6-4c92-4701-81d1-babf3286e867-host-cni-netd\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.280990 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/7ac84eb6-4c92-4701-81d1-babf3286e867-ovnkube-script-lib\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.281167 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/7ac84eb6-4c92-4701-81d1-babf3286e867-ovnkube-config\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.283422 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/7ac84eb6-4c92-4701-81d1-babf3286e867-ovn-node-metrics-cert\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.294897 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-krv8r\" (UniqueName: \"kubernetes.io/projected/7ac84eb6-4c92-4701-81d1-babf3286e867-kube-api-access-krv8r\") pod \"ovnkube-node-wxgkb\" (UID: \"7ac84eb6-4c92-4701-81d1-babf3286e867\") " pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.412259 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:35 crc kubenswrapper[4812]: W1125 16:57:35.433773 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7ac84eb6_4c92_4701_81d1_babf3286e867.slice/crio-e9d67b1bc0c54e9a421ef42269c229714ade900e414f323ee21e4a12c2fa9b35 WatchSource:0}: Error finding container e9d67b1bc0c54e9a421ef42269c229714ade900e414f323ee21e4a12c2fa9b35: Status 404 returned error can't find the container with id e9d67b1bc0c54e9a421ef42269c229714ade900e414f323ee21e4a12c2fa9b35 Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.652882 4812 generic.go:334] "Generic (PLEG): container finished" podID="7ac84eb6-4c92-4701-81d1-babf3286e867" containerID="490ef33aa6acd7c5db69e36c5ebc686539c26fb808b3430bb19d22bf84fff747" exitCode=0 Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.652973 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" event={"ID":"7ac84eb6-4c92-4701-81d1-babf3286e867","Type":"ContainerDied","Data":"490ef33aa6acd7c5db69e36c5ebc686539c26fb808b3430bb19d22bf84fff747"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.653033 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" event={"ID":"7ac84eb6-4c92-4701-81d1-babf3286e867","Type":"ContainerStarted","Data":"e9d67b1bc0c54e9a421ef42269c229714ade900e414f323ee21e4a12c2fa9b35"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.655382 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-m7ndd_3a156756-3629-4bed-8de0-1019226b7f04/kube-multus/2.log" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.656154 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-m7ndd_3a156756-3629-4bed-8de0-1019226b7f04/kube-multus/1.log" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.656229 4812 generic.go:334] "Generic (PLEG): container finished" podID="3a156756-3629-4bed-8de0-1019226b7f04" containerID="7b60d73868c8041ba2714a8ca55f1c992e9dc9254dd14cf1495b4db1e4dad249" exitCode=2 Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.656342 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-m7ndd" event={"ID":"3a156756-3629-4bed-8de0-1019226b7f04","Type":"ContainerDied","Data":"7b60d73868c8041ba2714a8ca55f1c992e9dc9254dd14cf1495b4db1e4dad249"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.656412 4812 scope.go:117] "RemoveContainer" containerID="1eaf67b5a5abeace1075cd58bc8f217c42feab58eddaa10ddf6969b850c3f9f3" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.658168 4812 scope.go:117] "RemoveContainer" containerID="7b60d73868c8041ba2714a8ca55f1c992e9dc9254dd14cf1495b4db1e4dad249" Nov 25 16:57:35 crc kubenswrapper[4812]: E1125 16:57:35.658381 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-m7ndd_openshift-multus(3a156756-3629-4bed-8de0-1019226b7f04)\"" pod="openshift-multus/multus-m7ndd" podUID="3a156756-3629-4bed-8de0-1019226b7f04" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.658915 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hwqsk_bc4dc9ff-11a1-4151-91f0-3ff83020b3b9/ovnkube-controller/3.log" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.662777 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hwqsk_bc4dc9ff-11a1-4151-91f0-3ff83020b3b9/ovn-acl-logging/0.log" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.663515 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hwqsk_bc4dc9ff-11a1-4151-91f0-3ff83020b3b9/ovn-controller/0.log" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664089 4812 generic.go:334] "Generic (PLEG): container finished" podID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerID="dc30d632bc69bd00f77037cefacfbcf46c964ebbc8e8d8c286b86f80b420782d" exitCode=0 Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664123 4812 generic.go:334] "Generic (PLEG): container finished" podID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerID="e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6" exitCode=0 Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664134 4812 generic.go:334] "Generic (PLEG): container finished" podID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerID="c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f" exitCode=0 Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664143 4812 generic.go:334] "Generic (PLEG): container finished" podID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerID="37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab" exitCode=0 Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664151 4812 generic.go:334] "Generic (PLEG): container finished" podID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerID="ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d" exitCode=0 Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664158 4812 generic.go:334] "Generic (PLEG): container finished" podID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerID="6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa" exitCode=0 Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664167 4812 generic.go:334] "Generic (PLEG): container finished" podID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerID="8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd" exitCode=143 Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664174 4812 generic.go:334] "Generic (PLEG): container finished" podID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" containerID="93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e" exitCode=143 Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664179 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664190 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" event={"ID":"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9","Type":"ContainerDied","Data":"dc30d632bc69bd00f77037cefacfbcf46c964ebbc8e8d8c286b86f80b420782d"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664244 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" event={"ID":"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9","Type":"ContainerDied","Data":"e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664266 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" event={"ID":"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9","Type":"ContainerDied","Data":"c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664278 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" event={"ID":"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9","Type":"ContainerDied","Data":"37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664292 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" event={"ID":"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9","Type":"ContainerDied","Data":"ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664308 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" event={"ID":"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9","Type":"ContainerDied","Data":"6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664320 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"dc30d632bc69bd00f77037cefacfbcf46c964ebbc8e8d8c286b86f80b420782d"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664334 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664342 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664349 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664355 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664362 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664369 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664375 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664382 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664388 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664397 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" event={"ID":"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9","Type":"ContainerDied","Data":"8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664408 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"dc30d632bc69bd00f77037cefacfbcf46c964ebbc8e8d8c286b86f80b420782d"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664416 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664423 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664429 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664436 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664442 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664449 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664458 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664466 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664473 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664482 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" event={"ID":"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9","Type":"ContainerDied","Data":"93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664492 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"dc30d632bc69bd00f77037cefacfbcf46c964ebbc8e8d8c286b86f80b420782d"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664499 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664511 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664518 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664525 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664549 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664556 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664562 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664569 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664575 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664585 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hwqsk" event={"ID":"bc4dc9ff-11a1-4151-91f0-3ff83020b3b9","Type":"ContainerDied","Data":"c5dd744dc9a8b03cba08022a22ca248c94ef83344b70f817ed416332dbf1a2aa"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664597 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"dc30d632bc69bd00f77037cefacfbcf46c964ebbc8e8d8c286b86f80b420782d"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664605 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664612 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664619 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664626 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664633 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664642 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664650 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664657 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.664664 4812 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740"} Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.688835 4812 scope.go:117] "RemoveContainer" containerID="dc30d632bc69bd00f77037cefacfbcf46c964ebbc8e8d8c286b86f80b420782d" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.717990 4812 scope.go:117] "RemoveContainer" containerID="94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.731651 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-hwqsk"] Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.736501 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-hwqsk"] Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.753928 4812 scope.go:117] "RemoveContainer" containerID="e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.769342 4812 scope.go:117] "RemoveContainer" containerID="c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.784547 4812 scope.go:117] "RemoveContainer" containerID="37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.833756 4812 scope.go:117] "RemoveContainer" containerID="ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.840725 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc4dc9ff-11a1-4151-91f0-3ff83020b3b9" path="/var/lib/kubelet/pods/bc4dc9ff-11a1-4151-91f0-3ff83020b3b9/volumes" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.858128 4812 scope.go:117] "RemoveContainer" containerID="6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.874232 4812 scope.go:117] "RemoveContainer" containerID="8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.887904 4812 scope.go:117] "RemoveContainer" containerID="93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.903093 4812 scope.go:117] "RemoveContainer" containerID="a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.920370 4812 scope.go:117] "RemoveContainer" containerID="dc30d632bc69bd00f77037cefacfbcf46c964ebbc8e8d8c286b86f80b420782d" Nov 25 16:57:35 crc kubenswrapper[4812]: E1125 16:57:35.920801 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dc30d632bc69bd00f77037cefacfbcf46c964ebbc8e8d8c286b86f80b420782d\": container with ID starting with dc30d632bc69bd00f77037cefacfbcf46c964ebbc8e8d8c286b86f80b420782d not found: ID does not exist" containerID="dc30d632bc69bd00f77037cefacfbcf46c964ebbc8e8d8c286b86f80b420782d" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.920831 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc30d632bc69bd00f77037cefacfbcf46c964ebbc8e8d8c286b86f80b420782d"} err="failed to get container status \"dc30d632bc69bd00f77037cefacfbcf46c964ebbc8e8d8c286b86f80b420782d\": rpc error: code = NotFound desc = could not find container \"dc30d632bc69bd00f77037cefacfbcf46c964ebbc8e8d8c286b86f80b420782d\": container with ID starting with dc30d632bc69bd00f77037cefacfbcf46c964ebbc8e8d8c286b86f80b420782d not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.920856 4812 scope.go:117] "RemoveContainer" containerID="94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a" Nov 25 16:57:35 crc kubenswrapper[4812]: E1125 16:57:35.921068 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a\": container with ID starting with 94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a not found: ID does not exist" containerID="94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.921089 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a"} err="failed to get container status \"94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a\": rpc error: code = NotFound desc = could not find container \"94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a\": container with ID starting with 94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.921102 4812 scope.go:117] "RemoveContainer" containerID="e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6" Nov 25 16:57:35 crc kubenswrapper[4812]: E1125 16:57:35.921299 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6\": container with ID starting with e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6 not found: ID does not exist" containerID="e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.921318 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6"} err="failed to get container status \"e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6\": rpc error: code = NotFound desc = could not find container \"e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6\": container with ID starting with e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6 not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.921329 4812 scope.go:117] "RemoveContainer" containerID="c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f" Nov 25 16:57:35 crc kubenswrapper[4812]: E1125 16:57:35.921521 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f\": container with ID starting with c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f not found: ID does not exist" containerID="c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.921565 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f"} err="failed to get container status \"c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f\": rpc error: code = NotFound desc = could not find container \"c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f\": container with ID starting with c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.921576 4812 scope.go:117] "RemoveContainer" containerID="37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab" Nov 25 16:57:35 crc kubenswrapper[4812]: E1125 16:57:35.921787 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab\": container with ID starting with 37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab not found: ID does not exist" containerID="37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.921808 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab"} err="failed to get container status \"37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab\": rpc error: code = NotFound desc = could not find container \"37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab\": container with ID starting with 37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.921820 4812 scope.go:117] "RemoveContainer" containerID="ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d" Nov 25 16:57:35 crc kubenswrapper[4812]: E1125 16:57:35.922004 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d\": container with ID starting with ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d not found: ID does not exist" containerID="ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.922025 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d"} err="failed to get container status \"ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d\": rpc error: code = NotFound desc = could not find container \"ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d\": container with ID starting with ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.922036 4812 scope.go:117] "RemoveContainer" containerID="6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa" Nov 25 16:57:35 crc kubenswrapper[4812]: E1125 16:57:35.922228 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa\": container with ID starting with 6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa not found: ID does not exist" containerID="6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.922249 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa"} err="failed to get container status \"6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa\": rpc error: code = NotFound desc = could not find container \"6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa\": container with ID starting with 6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.922262 4812 scope.go:117] "RemoveContainer" containerID="8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd" Nov 25 16:57:35 crc kubenswrapper[4812]: E1125 16:57:35.922461 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd\": container with ID starting with 8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd not found: ID does not exist" containerID="8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.922482 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd"} err="failed to get container status \"8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd\": rpc error: code = NotFound desc = could not find container \"8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd\": container with ID starting with 8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.922495 4812 scope.go:117] "RemoveContainer" containerID="93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e" Nov 25 16:57:35 crc kubenswrapper[4812]: E1125 16:57:35.922706 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e\": container with ID starting with 93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e not found: ID does not exist" containerID="93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.922734 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e"} err="failed to get container status \"93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e\": rpc error: code = NotFound desc = could not find container \"93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e\": container with ID starting with 93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.922750 4812 scope.go:117] "RemoveContainer" containerID="a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740" Nov 25 16:57:35 crc kubenswrapper[4812]: E1125 16:57:35.922942 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\": container with ID starting with a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740 not found: ID does not exist" containerID="a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.922966 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740"} err="failed to get container status \"a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\": rpc error: code = NotFound desc = could not find container \"a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\": container with ID starting with a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740 not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.922984 4812 scope.go:117] "RemoveContainer" containerID="dc30d632bc69bd00f77037cefacfbcf46c964ebbc8e8d8c286b86f80b420782d" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.923196 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc30d632bc69bd00f77037cefacfbcf46c964ebbc8e8d8c286b86f80b420782d"} err="failed to get container status \"dc30d632bc69bd00f77037cefacfbcf46c964ebbc8e8d8c286b86f80b420782d\": rpc error: code = NotFound desc = could not find container \"dc30d632bc69bd00f77037cefacfbcf46c964ebbc8e8d8c286b86f80b420782d\": container with ID starting with dc30d632bc69bd00f77037cefacfbcf46c964ebbc8e8d8c286b86f80b420782d not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.923212 4812 scope.go:117] "RemoveContainer" containerID="94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.923408 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a"} err="failed to get container status \"94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a\": rpc error: code = NotFound desc = could not find container \"94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a\": container with ID starting with 94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.923434 4812 scope.go:117] "RemoveContainer" containerID="e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.923842 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6"} err="failed to get container status \"e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6\": rpc error: code = NotFound desc = could not find container \"e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6\": container with ID starting with e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6 not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.923863 4812 scope.go:117] "RemoveContainer" containerID="c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.924157 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f"} err="failed to get container status \"c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f\": rpc error: code = NotFound desc = could not find container \"c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f\": container with ID starting with c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.924176 4812 scope.go:117] "RemoveContainer" containerID="37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.924430 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab"} err="failed to get container status \"37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab\": rpc error: code = NotFound desc = could not find container \"37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab\": container with ID starting with 37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.924448 4812 scope.go:117] "RemoveContainer" containerID="ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.924682 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d"} err="failed to get container status \"ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d\": rpc error: code = NotFound desc = could not find container \"ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d\": container with ID starting with ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.924697 4812 scope.go:117] "RemoveContainer" containerID="6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.924967 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa"} err="failed to get container status \"6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa\": rpc error: code = NotFound desc = could not find container \"6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa\": container with ID starting with 6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.924987 4812 scope.go:117] "RemoveContainer" containerID="8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.925187 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd"} err="failed to get container status \"8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd\": rpc error: code = NotFound desc = could not find container \"8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd\": container with ID starting with 8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.925206 4812 scope.go:117] "RemoveContainer" containerID="93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.925413 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e"} err="failed to get container status \"93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e\": rpc error: code = NotFound desc = could not find container \"93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e\": container with ID starting with 93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.925431 4812 scope.go:117] "RemoveContainer" containerID="a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.926376 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740"} err="failed to get container status \"a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\": rpc error: code = NotFound desc = could not find container \"a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\": container with ID starting with a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740 not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.926398 4812 scope.go:117] "RemoveContainer" containerID="dc30d632bc69bd00f77037cefacfbcf46c964ebbc8e8d8c286b86f80b420782d" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.927549 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc30d632bc69bd00f77037cefacfbcf46c964ebbc8e8d8c286b86f80b420782d"} err="failed to get container status \"dc30d632bc69bd00f77037cefacfbcf46c964ebbc8e8d8c286b86f80b420782d\": rpc error: code = NotFound desc = could not find container \"dc30d632bc69bd00f77037cefacfbcf46c964ebbc8e8d8c286b86f80b420782d\": container with ID starting with dc30d632bc69bd00f77037cefacfbcf46c964ebbc8e8d8c286b86f80b420782d not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.927570 4812 scope.go:117] "RemoveContainer" containerID="94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.927994 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a"} err="failed to get container status \"94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a\": rpc error: code = NotFound desc = could not find container \"94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a\": container with ID starting with 94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.928015 4812 scope.go:117] "RemoveContainer" containerID="e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.928420 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6"} err="failed to get container status \"e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6\": rpc error: code = NotFound desc = could not find container \"e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6\": container with ID starting with e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6 not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.928438 4812 scope.go:117] "RemoveContainer" containerID="c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.928708 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f"} err="failed to get container status \"c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f\": rpc error: code = NotFound desc = could not find container \"c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f\": container with ID starting with c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.928733 4812 scope.go:117] "RemoveContainer" containerID="37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.929091 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab"} err="failed to get container status \"37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab\": rpc error: code = NotFound desc = could not find container \"37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab\": container with ID starting with 37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.929115 4812 scope.go:117] "RemoveContainer" containerID="ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.929385 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d"} err="failed to get container status \"ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d\": rpc error: code = NotFound desc = could not find container \"ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d\": container with ID starting with ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.929409 4812 scope.go:117] "RemoveContainer" containerID="6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.929719 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa"} err="failed to get container status \"6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa\": rpc error: code = NotFound desc = could not find container \"6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa\": container with ID starting with 6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.929743 4812 scope.go:117] "RemoveContainer" containerID="8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.931214 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd"} err="failed to get container status \"8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd\": rpc error: code = NotFound desc = could not find container \"8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd\": container with ID starting with 8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.931245 4812 scope.go:117] "RemoveContainer" containerID="93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.931694 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e"} err="failed to get container status \"93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e\": rpc error: code = NotFound desc = could not find container \"93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e\": container with ID starting with 93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.931722 4812 scope.go:117] "RemoveContainer" containerID="a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.932025 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740"} err="failed to get container status \"a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\": rpc error: code = NotFound desc = could not find container \"a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\": container with ID starting with a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740 not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.932051 4812 scope.go:117] "RemoveContainer" containerID="dc30d632bc69bd00f77037cefacfbcf46c964ebbc8e8d8c286b86f80b420782d" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.932653 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc30d632bc69bd00f77037cefacfbcf46c964ebbc8e8d8c286b86f80b420782d"} err="failed to get container status \"dc30d632bc69bd00f77037cefacfbcf46c964ebbc8e8d8c286b86f80b420782d\": rpc error: code = NotFound desc = could not find container \"dc30d632bc69bd00f77037cefacfbcf46c964ebbc8e8d8c286b86f80b420782d\": container with ID starting with dc30d632bc69bd00f77037cefacfbcf46c964ebbc8e8d8c286b86f80b420782d not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.932710 4812 scope.go:117] "RemoveContainer" containerID="94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.932949 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a"} err="failed to get container status \"94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a\": rpc error: code = NotFound desc = could not find container \"94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a\": container with ID starting with 94ef27f4d669f937266aa5ff0bb9a89f6addf88810e4cb71e5529af9e8dcbf3a not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.932969 4812 scope.go:117] "RemoveContainer" containerID="e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.933314 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6"} err="failed to get container status \"e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6\": rpc error: code = NotFound desc = could not find container \"e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6\": container with ID starting with e32184cabb1e4fa664bd59059b7dc67fad78a564dca4eb0fad5afa37f32137e6 not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.933356 4812 scope.go:117] "RemoveContainer" containerID="c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.933662 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f"} err="failed to get container status \"c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f\": rpc error: code = NotFound desc = could not find container \"c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f\": container with ID starting with c7f86f73554807435cdb71ea731f6a5c5d5e4aa81c1224cfbe2c4c9a0949029f not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.933712 4812 scope.go:117] "RemoveContainer" containerID="37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.934019 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab"} err="failed to get container status \"37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab\": rpc error: code = NotFound desc = could not find container \"37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab\": container with ID starting with 37bd7a50ef7afdc0d0ac08348dfc3f38435e009d5b37070a1f64711e812d3aab not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.934043 4812 scope.go:117] "RemoveContainer" containerID="ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.934284 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d"} err="failed to get container status \"ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d\": rpc error: code = NotFound desc = could not find container \"ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d\": container with ID starting with ca5535ea56ee12a5543d09a367f653f99954b8fe9be956aeedc3291a623c5b0d not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.934314 4812 scope.go:117] "RemoveContainer" containerID="6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.934579 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa"} err="failed to get container status \"6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa\": rpc error: code = NotFound desc = could not find container \"6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa\": container with ID starting with 6bfecb63d714aaee1842cc0f3be574c7b346b1ff08d85bf7f0548b5e4faee4fa not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.934606 4812 scope.go:117] "RemoveContainer" containerID="8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.934874 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd"} err="failed to get container status \"8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd\": rpc error: code = NotFound desc = could not find container \"8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd\": container with ID starting with 8c7a0c7604c60aad6083da60f8ea48ed8d7018d5939b296c0b51afe439be89bd not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.934962 4812 scope.go:117] "RemoveContainer" containerID="93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.935403 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e"} err="failed to get container status \"93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e\": rpc error: code = NotFound desc = could not find container \"93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e\": container with ID starting with 93e77bbff4a61b9bcb73c48cc8179b7e4d6ce5301ebea0fe2b0eb81ae0e25c8e not found: ID does not exist" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.935431 4812 scope.go:117] "RemoveContainer" containerID="a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740" Nov 25 16:57:35 crc kubenswrapper[4812]: I1125 16:57:35.935710 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740"} err="failed to get container status \"a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\": rpc error: code = NotFound desc = could not find container \"a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740\": container with ID starting with a6e686478d2047341e3705eb55d05c2ce02e8d6136dfa6c2fd4ea2166b839740 not found: ID does not exist" Nov 25 16:57:36 crc kubenswrapper[4812]: I1125 16:57:36.674985 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" event={"ID":"7ac84eb6-4c92-4701-81d1-babf3286e867","Type":"ContainerStarted","Data":"fe177103f70899a8009723f1a570b67c36f92532b1a3480fa144506083d71f24"} Nov 25 16:57:36 crc kubenswrapper[4812]: I1125 16:57:36.675313 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" event={"ID":"7ac84eb6-4c92-4701-81d1-babf3286e867","Type":"ContainerStarted","Data":"d693deb647db2d79d52f4a3bc3eb35f4cb7e50f3623cf41d34907862700d2dbb"} Nov 25 16:57:36 crc kubenswrapper[4812]: I1125 16:57:36.675325 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" event={"ID":"7ac84eb6-4c92-4701-81d1-babf3286e867","Type":"ContainerStarted","Data":"a9a8c630bd362b5c2b7a87ca5984c061a573b42aeffaf5eabef69f8fe620bce9"} Nov 25 16:57:36 crc kubenswrapper[4812]: I1125 16:57:36.675335 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" event={"ID":"7ac84eb6-4c92-4701-81d1-babf3286e867","Type":"ContainerStarted","Data":"8e5d6b05b914a00da5940f65f247f6ea9e1b4b4695f0bd391610ac937593b21b"} Nov 25 16:57:36 crc kubenswrapper[4812]: I1125 16:57:36.675344 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" event={"ID":"7ac84eb6-4c92-4701-81d1-babf3286e867","Type":"ContainerStarted","Data":"85db14ff3371199bc562f9ea3819887be00ae00a435ce1b5959e5c3b3680baea"} Nov 25 16:57:36 crc kubenswrapper[4812]: I1125 16:57:36.675353 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" event={"ID":"7ac84eb6-4c92-4701-81d1-babf3286e867","Type":"ContainerStarted","Data":"f7becf04096de33882b60cdff578cd210e6f4fe3383bc4b37ad9c423550cf5b8"} Nov 25 16:57:36 crc kubenswrapper[4812]: I1125 16:57:36.676472 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-m7ndd_3a156756-3629-4bed-8de0-1019226b7f04/kube-multus/2.log" Nov 25 16:57:38 crc kubenswrapper[4812]: I1125 16:57:38.690304 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" event={"ID":"7ac84eb6-4c92-4701-81d1-babf3286e867","Type":"ContainerStarted","Data":"651928bb7b10a874cfae430bb94f091610547c67ba6c59847de8db5a136a436d"} Nov 25 16:57:41 crc kubenswrapper[4812]: I1125 16:57:41.709598 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" event={"ID":"7ac84eb6-4c92-4701-81d1-babf3286e867","Type":"ContainerStarted","Data":"4b5fc880cb7eba493f3ccc92e17d29d879667ffb5648bc22e63c5ba4b920b69d"} Nov 25 16:57:41 crc kubenswrapper[4812]: I1125 16:57:41.710065 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:41 crc kubenswrapper[4812]: I1125 16:57:41.710091 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:41 crc kubenswrapper[4812]: I1125 16:57:41.710106 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:41 crc kubenswrapper[4812]: I1125 16:57:41.733073 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:41 crc kubenswrapper[4812]: I1125 16:57:41.735076 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:57:41 crc kubenswrapper[4812]: I1125 16:57:41.738318 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" podStartSLOduration=6.738300324 podStartE2EDuration="6.738300324s" podCreationTimestamp="2025-11-25 16:57:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:57:41.734281743 +0000 UTC m=+636.574423838" watchObservedRunningTime="2025-11-25 16:57:41.738300324 +0000 UTC m=+636.578442419" Nov 25 16:57:50 crc kubenswrapper[4812]: I1125 16:57:50.831335 4812 scope.go:117] "RemoveContainer" containerID="7b60d73868c8041ba2714a8ca55f1c992e9dc9254dd14cf1495b4db1e4dad249" Nov 25 16:57:50 crc kubenswrapper[4812]: E1125 16:57:50.832079 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-m7ndd_openshift-multus(3a156756-3629-4bed-8de0-1019226b7f04)\"" pod="openshift-multus/multus-m7ndd" podUID="3a156756-3629-4bed-8de0-1019226b7f04" Nov 25 16:58:03 crc kubenswrapper[4812]: I1125 16:58:03.831340 4812 scope.go:117] "RemoveContainer" containerID="7b60d73868c8041ba2714a8ca55f1c992e9dc9254dd14cf1495b4db1e4dad249" Nov 25 16:58:04 crc kubenswrapper[4812]: I1125 16:58:04.833505 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-m7ndd_3a156756-3629-4bed-8de0-1019226b7f04/kube-multus/2.log" Nov 25 16:58:04 crc kubenswrapper[4812]: I1125 16:58:04.834224 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-m7ndd" event={"ID":"3a156756-3629-4bed-8de0-1019226b7f04","Type":"ContainerStarted","Data":"63a0848072fd33b488c9e218f16de473b6a790f7fd890f1e20daa11a545808b7"} Nov 25 16:58:05 crc kubenswrapper[4812]: I1125 16:58:05.437804 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-wxgkb" Nov 25 16:58:13 crc kubenswrapper[4812]: I1125 16:58:13.397590 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emcsls"] Nov 25 16:58:13 crc kubenswrapper[4812]: I1125 16:58:13.399136 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emcsls" Nov 25 16:58:13 crc kubenswrapper[4812]: I1125 16:58:13.404157 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 25 16:58:13 crc kubenswrapper[4812]: I1125 16:58:13.407727 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emcsls"] Nov 25 16:58:13 crc kubenswrapper[4812]: I1125 16:58:13.454039 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qtsrt\" (UniqueName: \"kubernetes.io/projected/c92d5951-593e-4757-a7f6-aa46198abc4b-kube-api-access-qtsrt\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emcsls\" (UID: \"c92d5951-593e-4757-a7f6-aa46198abc4b\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emcsls" Nov 25 16:58:13 crc kubenswrapper[4812]: I1125 16:58:13.454311 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c92d5951-593e-4757-a7f6-aa46198abc4b-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emcsls\" (UID: \"c92d5951-593e-4757-a7f6-aa46198abc4b\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emcsls" Nov 25 16:58:13 crc kubenswrapper[4812]: I1125 16:58:13.454369 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c92d5951-593e-4757-a7f6-aa46198abc4b-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emcsls\" (UID: \"c92d5951-593e-4757-a7f6-aa46198abc4b\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emcsls" Nov 25 16:58:13 crc kubenswrapper[4812]: I1125 16:58:13.555440 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qtsrt\" (UniqueName: \"kubernetes.io/projected/c92d5951-593e-4757-a7f6-aa46198abc4b-kube-api-access-qtsrt\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emcsls\" (UID: \"c92d5951-593e-4757-a7f6-aa46198abc4b\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emcsls" Nov 25 16:58:13 crc kubenswrapper[4812]: I1125 16:58:13.555504 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c92d5951-593e-4757-a7f6-aa46198abc4b-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emcsls\" (UID: \"c92d5951-593e-4757-a7f6-aa46198abc4b\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emcsls" Nov 25 16:58:13 crc kubenswrapper[4812]: I1125 16:58:13.555565 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c92d5951-593e-4757-a7f6-aa46198abc4b-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emcsls\" (UID: \"c92d5951-593e-4757-a7f6-aa46198abc4b\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emcsls" Nov 25 16:58:13 crc kubenswrapper[4812]: I1125 16:58:13.556118 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c92d5951-593e-4757-a7f6-aa46198abc4b-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emcsls\" (UID: \"c92d5951-593e-4757-a7f6-aa46198abc4b\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emcsls" Nov 25 16:58:13 crc kubenswrapper[4812]: I1125 16:58:13.556248 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c92d5951-593e-4757-a7f6-aa46198abc4b-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emcsls\" (UID: \"c92d5951-593e-4757-a7f6-aa46198abc4b\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emcsls" Nov 25 16:58:13 crc kubenswrapper[4812]: I1125 16:58:13.573954 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qtsrt\" (UniqueName: \"kubernetes.io/projected/c92d5951-593e-4757-a7f6-aa46198abc4b-kube-api-access-qtsrt\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emcsls\" (UID: \"c92d5951-593e-4757-a7f6-aa46198abc4b\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emcsls" Nov 25 16:58:13 crc kubenswrapper[4812]: I1125 16:58:13.714687 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emcsls" Nov 25 16:58:13 crc kubenswrapper[4812]: I1125 16:58:13.897483 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emcsls"] Nov 25 16:58:14 crc kubenswrapper[4812]: I1125 16:58:14.881187 4812 generic.go:334] "Generic (PLEG): container finished" podID="c92d5951-593e-4757-a7f6-aa46198abc4b" containerID="600c727103db703c2d8bbe8d3d9aa5b5f4a8f81a7c1e7251ffe233cd787e28d7" exitCode=0 Nov 25 16:58:14 crc kubenswrapper[4812]: I1125 16:58:14.881252 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emcsls" event={"ID":"c92d5951-593e-4757-a7f6-aa46198abc4b","Type":"ContainerDied","Data":"600c727103db703c2d8bbe8d3d9aa5b5f4a8f81a7c1e7251ffe233cd787e28d7"} Nov 25 16:58:14 crc kubenswrapper[4812]: I1125 16:58:14.881280 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emcsls" event={"ID":"c92d5951-593e-4757-a7f6-aa46198abc4b","Type":"ContainerStarted","Data":"5009132fa058dc096155bf8026056ff132048c9bfe70682fed1d93d7a45cb821"} Nov 25 16:58:16 crc kubenswrapper[4812]: I1125 16:58:16.894857 4812 generic.go:334] "Generic (PLEG): container finished" podID="c92d5951-593e-4757-a7f6-aa46198abc4b" containerID="291200182b1286635ebaa37bbe373945d7b9178199e17a240c8a80b5dcf65bef" exitCode=0 Nov 25 16:58:16 crc kubenswrapper[4812]: I1125 16:58:16.895307 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emcsls" event={"ID":"c92d5951-593e-4757-a7f6-aa46198abc4b","Type":"ContainerDied","Data":"291200182b1286635ebaa37bbe373945d7b9178199e17a240c8a80b5dcf65bef"} Nov 25 16:58:18 crc kubenswrapper[4812]: I1125 16:58:18.010983 4812 generic.go:334] "Generic (PLEG): container finished" podID="c92d5951-593e-4757-a7f6-aa46198abc4b" containerID="5a1bd15eb181e554d0c9208b8a1093fb2f95db14a871d26c3e15bd3a4b5a93af" exitCode=0 Nov 25 16:58:18 crc kubenswrapper[4812]: I1125 16:58:18.011240 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emcsls" event={"ID":"c92d5951-593e-4757-a7f6-aa46198abc4b","Type":"ContainerDied","Data":"5a1bd15eb181e554d0c9208b8a1093fb2f95db14a871d26c3e15bd3a4b5a93af"} Nov 25 16:58:19 crc kubenswrapper[4812]: I1125 16:58:19.303825 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emcsls" Nov 25 16:58:19 crc kubenswrapper[4812]: I1125 16:58:19.436195 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qtsrt\" (UniqueName: \"kubernetes.io/projected/c92d5951-593e-4757-a7f6-aa46198abc4b-kube-api-access-qtsrt\") pod \"c92d5951-593e-4757-a7f6-aa46198abc4b\" (UID: \"c92d5951-593e-4757-a7f6-aa46198abc4b\") " Nov 25 16:58:19 crc kubenswrapper[4812]: I1125 16:58:19.436261 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c92d5951-593e-4757-a7f6-aa46198abc4b-util\") pod \"c92d5951-593e-4757-a7f6-aa46198abc4b\" (UID: \"c92d5951-593e-4757-a7f6-aa46198abc4b\") " Nov 25 16:58:19 crc kubenswrapper[4812]: I1125 16:58:19.436354 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c92d5951-593e-4757-a7f6-aa46198abc4b-bundle\") pod \"c92d5951-593e-4757-a7f6-aa46198abc4b\" (UID: \"c92d5951-593e-4757-a7f6-aa46198abc4b\") " Nov 25 16:58:19 crc kubenswrapper[4812]: I1125 16:58:19.437806 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c92d5951-593e-4757-a7f6-aa46198abc4b-bundle" (OuterVolumeSpecName: "bundle") pod "c92d5951-593e-4757-a7f6-aa46198abc4b" (UID: "c92d5951-593e-4757-a7f6-aa46198abc4b"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:58:19 crc kubenswrapper[4812]: I1125 16:58:19.442053 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c92d5951-593e-4757-a7f6-aa46198abc4b-kube-api-access-qtsrt" (OuterVolumeSpecName: "kube-api-access-qtsrt") pod "c92d5951-593e-4757-a7f6-aa46198abc4b" (UID: "c92d5951-593e-4757-a7f6-aa46198abc4b"). InnerVolumeSpecName "kube-api-access-qtsrt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:58:19 crc kubenswrapper[4812]: I1125 16:58:19.461679 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c92d5951-593e-4757-a7f6-aa46198abc4b-util" (OuterVolumeSpecName: "util") pod "c92d5951-593e-4757-a7f6-aa46198abc4b" (UID: "c92d5951-593e-4757-a7f6-aa46198abc4b"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:58:19 crc kubenswrapper[4812]: I1125 16:58:19.538719 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qtsrt\" (UniqueName: \"kubernetes.io/projected/c92d5951-593e-4757-a7f6-aa46198abc4b-kube-api-access-qtsrt\") on node \"crc\" DevicePath \"\"" Nov 25 16:58:19 crc kubenswrapper[4812]: I1125 16:58:19.538776 4812 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c92d5951-593e-4757-a7f6-aa46198abc4b-util\") on node \"crc\" DevicePath \"\"" Nov 25 16:58:19 crc kubenswrapper[4812]: I1125 16:58:19.538796 4812 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c92d5951-593e-4757-a7f6-aa46198abc4b-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 16:58:20 crc kubenswrapper[4812]: I1125 16:58:20.026889 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emcsls" event={"ID":"c92d5951-593e-4757-a7f6-aa46198abc4b","Type":"ContainerDied","Data":"5009132fa058dc096155bf8026056ff132048c9bfe70682fed1d93d7a45cb821"} Nov 25 16:58:20 crc kubenswrapper[4812]: I1125 16:58:20.027262 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5009132fa058dc096155bf8026056ff132048c9bfe70682fed1d93d7a45cb821" Nov 25 16:58:20 crc kubenswrapper[4812]: I1125 16:58:20.026982 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emcsls" Nov 25 16:58:22 crc kubenswrapper[4812]: I1125 16:58:22.223014 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-s8s7n"] Nov 25 16:58:22 crc kubenswrapper[4812]: E1125 16:58:22.223281 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c92d5951-593e-4757-a7f6-aa46198abc4b" containerName="pull" Nov 25 16:58:22 crc kubenswrapper[4812]: I1125 16:58:22.223295 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="c92d5951-593e-4757-a7f6-aa46198abc4b" containerName="pull" Nov 25 16:58:22 crc kubenswrapper[4812]: E1125 16:58:22.223316 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c92d5951-593e-4757-a7f6-aa46198abc4b" containerName="util" Nov 25 16:58:22 crc kubenswrapper[4812]: I1125 16:58:22.223323 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="c92d5951-593e-4757-a7f6-aa46198abc4b" containerName="util" Nov 25 16:58:22 crc kubenswrapper[4812]: E1125 16:58:22.223336 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c92d5951-593e-4757-a7f6-aa46198abc4b" containerName="extract" Nov 25 16:58:22 crc kubenswrapper[4812]: I1125 16:58:22.223344 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="c92d5951-593e-4757-a7f6-aa46198abc4b" containerName="extract" Nov 25 16:58:22 crc kubenswrapper[4812]: I1125 16:58:22.223453 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="c92d5951-593e-4757-a7f6-aa46198abc4b" containerName="extract" Nov 25 16:58:22 crc kubenswrapper[4812]: I1125 16:58:22.223961 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-s8s7n" Nov 25 16:58:22 crc kubenswrapper[4812]: I1125 16:58:22.226400 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 25 16:58:22 crc kubenswrapper[4812]: I1125 16:58:22.226713 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 25 16:58:22 crc kubenswrapper[4812]: I1125 16:58:22.236773 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-s8s7n"] Nov 25 16:58:22 crc kubenswrapper[4812]: I1125 16:58:22.240615 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-8wt7c" Nov 25 16:58:22 crc kubenswrapper[4812]: I1125 16:58:22.277040 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-98bc7\" (UniqueName: \"kubernetes.io/projected/8e0b6638-e56a-4ab9-93e0-85ec4e6f9da7-kube-api-access-98bc7\") pod \"nmstate-operator-557fdffb88-s8s7n\" (UID: \"8e0b6638-e56a-4ab9-93e0-85ec4e6f9da7\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-s8s7n" Nov 25 16:58:22 crc kubenswrapper[4812]: I1125 16:58:22.378491 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-98bc7\" (UniqueName: \"kubernetes.io/projected/8e0b6638-e56a-4ab9-93e0-85ec4e6f9da7-kube-api-access-98bc7\") pod \"nmstate-operator-557fdffb88-s8s7n\" (UID: \"8e0b6638-e56a-4ab9-93e0-85ec4e6f9da7\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-s8s7n" Nov 25 16:58:22 crc kubenswrapper[4812]: I1125 16:58:22.400554 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-98bc7\" (UniqueName: \"kubernetes.io/projected/8e0b6638-e56a-4ab9-93e0-85ec4e6f9da7-kube-api-access-98bc7\") pod \"nmstate-operator-557fdffb88-s8s7n\" (UID: \"8e0b6638-e56a-4ab9-93e0-85ec4e6f9da7\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-s8s7n" Nov 25 16:58:22 crc kubenswrapper[4812]: I1125 16:58:22.539160 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-s8s7n" Nov 25 16:58:22 crc kubenswrapper[4812]: I1125 16:58:22.712402 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-s8s7n"] Nov 25 16:58:23 crc kubenswrapper[4812]: I1125 16:58:23.043834 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-s8s7n" event={"ID":"8e0b6638-e56a-4ab9-93e0-85ec4e6f9da7","Type":"ContainerStarted","Data":"0233abbd2db7d53a1c8ed8bb966b27d8831eb4d5c0502459170a9bfb5ff5af7b"} Nov 25 16:58:25 crc kubenswrapper[4812]: I1125 16:58:25.057002 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-s8s7n" event={"ID":"8e0b6638-e56a-4ab9-93e0-85ec4e6f9da7","Type":"ContainerStarted","Data":"a94f8dd57107a11b62b8be01e54b1695814a7ea4949e5260357e2fdaa16d7605"} Nov 25 16:58:25 crc kubenswrapper[4812]: I1125 16:58:25.072866 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-557fdffb88-s8s7n" podStartSLOduration=1.216664503 podStartE2EDuration="3.072840537s" podCreationTimestamp="2025-11-25 16:58:22 +0000 UTC" firstStartedPulling="2025-11-25 16:58:22.7223353 +0000 UTC m=+677.562477395" lastFinishedPulling="2025-11-25 16:58:24.578511304 +0000 UTC m=+679.418653429" observedRunningTime="2025-11-25 16:58:25.069693047 +0000 UTC m=+679.909835152" watchObservedRunningTime="2025-11-25 16:58:25.072840537 +0000 UTC m=+679.912982632" Nov 25 16:58:25 crc kubenswrapper[4812]: I1125 16:58:25.881253 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-hbrxh"] Nov 25 16:58:25 crc kubenswrapper[4812]: I1125 16:58:25.883017 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-hbrxh" Nov 25 16:58:25 crc kubenswrapper[4812]: I1125 16:58:25.888148 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-2vmzh" Nov 25 16:58:25 crc kubenswrapper[4812]: I1125 16:58:25.901371 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-q7qht"] Nov 25 16:58:25 crc kubenswrapper[4812]: I1125 16:58:25.902264 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-q7qht" Nov 25 16:58:25 crc kubenswrapper[4812]: I1125 16:58:25.906456 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 25 16:58:25 crc kubenswrapper[4812]: I1125 16:58:25.909302 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-h42z9"] Nov 25 16:58:25 crc kubenswrapper[4812]: I1125 16:58:25.910223 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-h42z9" Nov 25 16:58:25 crc kubenswrapper[4812]: I1125 16:58:25.920579 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-hbrxh"] Nov 25 16:58:25 crc kubenswrapper[4812]: I1125 16:58:25.932589 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-q7qht"] Nov 25 16:58:25 crc kubenswrapper[4812]: I1125 16:58:25.947147 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-52bw2\" (UniqueName: \"kubernetes.io/projected/098fce70-9b90-4d5b-8105-b60be0cdadef-kube-api-access-52bw2\") pod \"nmstate-metrics-5dcf9c57c5-hbrxh\" (UID: \"098fce70-9b90-4d5b-8105-b60be0cdadef\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-hbrxh" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.006380 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2kzbf"] Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.007144 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2kzbf" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.009078 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-v2wvg" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.009425 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.009785 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.015573 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2kzbf"] Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.048116 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-52bw2\" (UniqueName: \"kubernetes.io/projected/098fce70-9b90-4d5b-8105-b60be0cdadef-kube-api-access-52bw2\") pod \"nmstate-metrics-5dcf9c57c5-hbrxh\" (UID: \"098fce70-9b90-4d5b-8105-b60be0cdadef\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-hbrxh" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.048187 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/6a64a231-50f8-4025-811f-0c0b1941c17e-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-q7qht\" (UID: \"6a64a231-50f8-4025-811f-0c0b1941c17e\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-q7qht" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.048215 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/8af96a4a-80d0-4e12-8050-8a0290dd5123-ovs-socket\") pod \"nmstate-handler-h42z9\" (UID: \"8af96a4a-80d0-4e12-8050-8a0290dd5123\") " pod="openshift-nmstate/nmstate-handler-h42z9" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.048251 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/8af96a4a-80d0-4e12-8050-8a0290dd5123-nmstate-lock\") pod \"nmstate-handler-h42z9\" (UID: \"8af96a4a-80d0-4e12-8050-8a0290dd5123\") " pod="openshift-nmstate/nmstate-handler-h42z9" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.048275 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/8af96a4a-80d0-4e12-8050-8a0290dd5123-dbus-socket\") pod \"nmstate-handler-h42z9\" (UID: \"8af96a4a-80d0-4e12-8050-8a0290dd5123\") " pod="openshift-nmstate/nmstate-handler-h42z9" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.048356 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zgjhs\" (UniqueName: \"kubernetes.io/projected/6a64a231-50f8-4025-811f-0c0b1941c17e-kube-api-access-zgjhs\") pod \"nmstate-webhook-6b89b748d8-q7qht\" (UID: \"6a64a231-50f8-4025-811f-0c0b1941c17e\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-q7qht" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.048408 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6m7hq\" (UniqueName: \"kubernetes.io/projected/8af96a4a-80d0-4e12-8050-8a0290dd5123-kube-api-access-6m7hq\") pod \"nmstate-handler-h42z9\" (UID: \"8af96a4a-80d0-4e12-8050-8a0290dd5123\") " pod="openshift-nmstate/nmstate-handler-h42z9" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.079903 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-52bw2\" (UniqueName: \"kubernetes.io/projected/098fce70-9b90-4d5b-8105-b60be0cdadef-kube-api-access-52bw2\") pod \"nmstate-metrics-5dcf9c57c5-hbrxh\" (UID: \"098fce70-9b90-4d5b-8105-b60be0cdadef\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-hbrxh" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.150068 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/6a64a231-50f8-4025-811f-0c0b1941c17e-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-q7qht\" (UID: \"6a64a231-50f8-4025-811f-0c0b1941c17e\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-q7qht" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.150332 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x5mdg\" (UniqueName: \"kubernetes.io/projected/9b1843f8-233e-4a9a-b135-8a0d977ffea8-kube-api-access-x5mdg\") pod \"nmstate-console-plugin-5874bd7bc5-2kzbf\" (UID: \"9b1843f8-233e-4a9a-b135-8a0d977ffea8\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2kzbf" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.150480 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/8af96a4a-80d0-4e12-8050-8a0290dd5123-ovs-socket\") pod \"nmstate-handler-h42z9\" (UID: \"8af96a4a-80d0-4e12-8050-8a0290dd5123\") " pod="openshift-nmstate/nmstate-handler-h42z9" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.150603 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/9b1843f8-233e-4a9a-b135-8a0d977ffea8-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-2kzbf\" (UID: \"9b1843f8-233e-4a9a-b135-8a0d977ffea8\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2kzbf" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.150728 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/8af96a4a-80d0-4e12-8050-8a0290dd5123-nmstate-lock\") pod \"nmstate-handler-h42z9\" (UID: \"8af96a4a-80d0-4e12-8050-8a0290dd5123\") " pod="openshift-nmstate/nmstate-handler-h42z9" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.150799 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/8af96a4a-80d0-4e12-8050-8a0290dd5123-nmstate-lock\") pod \"nmstate-handler-h42z9\" (UID: \"8af96a4a-80d0-4e12-8050-8a0290dd5123\") " pod="openshift-nmstate/nmstate-handler-h42z9" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.150917 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/8af96a4a-80d0-4e12-8050-8a0290dd5123-dbus-socket\") pod \"nmstate-handler-h42z9\" (UID: \"8af96a4a-80d0-4e12-8050-8a0290dd5123\") " pod="openshift-nmstate/nmstate-handler-h42z9" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.150610 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/8af96a4a-80d0-4e12-8050-8a0290dd5123-ovs-socket\") pod \"nmstate-handler-h42z9\" (UID: \"8af96a4a-80d0-4e12-8050-8a0290dd5123\") " pod="openshift-nmstate/nmstate-handler-h42z9" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.151137 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zgjhs\" (UniqueName: \"kubernetes.io/projected/6a64a231-50f8-4025-811f-0c0b1941c17e-kube-api-access-zgjhs\") pod \"nmstate-webhook-6b89b748d8-q7qht\" (UID: \"6a64a231-50f8-4025-811f-0c0b1941c17e\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-q7qht" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.151238 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/9b1843f8-233e-4a9a-b135-8a0d977ffea8-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-2kzbf\" (UID: \"9b1843f8-233e-4a9a-b135-8a0d977ffea8\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2kzbf" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.151337 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6m7hq\" (UniqueName: \"kubernetes.io/projected/8af96a4a-80d0-4e12-8050-8a0290dd5123-kube-api-access-6m7hq\") pod \"nmstate-handler-h42z9\" (UID: \"8af96a4a-80d0-4e12-8050-8a0290dd5123\") " pod="openshift-nmstate/nmstate-handler-h42z9" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.151353 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/8af96a4a-80d0-4e12-8050-8a0290dd5123-dbus-socket\") pod \"nmstate-handler-h42z9\" (UID: \"8af96a4a-80d0-4e12-8050-8a0290dd5123\") " pod="openshift-nmstate/nmstate-handler-h42z9" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.169401 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6m7hq\" (UniqueName: \"kubernetes.io/projected/8af96a4a-80d0-4e12-8050-8a0290dd5123-kube-api-access-6m7hq\") pod \"nmstate-handler-h42z9\" (UID: \"8af96a4a-80d0-4e12-8050-8a0290dd5123\") " pod="openshift-nmstate/nmstate-handler-h42z9" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.170164 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zgjhs\" (UniqueName: \"kubernetes.io/projected/6a64a231-50f8-4025-811f-0c0b1941c17e-kube-api-access-zgjhs\") pod \"nmstate-webhook-6b89b748d8-q7qht\" (UID: \"6a64a231-50f8-4025-811f-0c0b1941c17e\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-q7qht" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.174795 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/6a64a231-50f8-4025-811f-0c0b1941c17e-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-q7qht\" (UID: \"6a64a231-50f8-4025-811f-0c0b1941c17e\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-q7qht" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.201857 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-7fbcd55fbb-dr9jj"] Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.202524 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7fbcd55fbb-dr9jj" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.217901 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-hbrxh" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.223287 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-7fbcd55fbb-dr9jj"] Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.234911 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-q7qht" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.252172 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-h42z9" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.252768 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x5mdg\" (UniqueName: \"kubernetes.io/projected/9b1843f8-233e-4a9a-b135-8a0d977ffea8-kube-api-access-x5mdg\") pod \"nmstate-console-plugin-5874bd7bc5-2kzbf\" (UID: \"9b1843f8-233e-4a9a-b135-8a0d977ffea8\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2kzbf" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.254240 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/9b1843f8-233e-4a9a-b135-8a0d977ffea8-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-2kzbf\" (UID: \"9b1843f8-233e-4a9a-b135-8a0d977ffea8\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2kzbf" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.254409 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/9b1843f8-233e-4a9a-b135-8a0d977ffea8-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-2kzbf\" (UID: \"9b1843f8-233e-4a9a-b135-8a0d977ffea8\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2kzbf" Nov 25 16:58:26 crc kubenswrapper[4812]: E1125 16:58:26.254621 4812 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Nov 25 16:58:26 crc kubenswrapper[4812]: E1125 16:58:26.254752 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9b1843f8-233e-4a9a-b135-8a0d977ffea8-plugin-serving-cert podName:9b1843f8-233e-4a9a-b135-8a0d977ffea8 nodeName:}" failed. No retries permitted until 2025-11-25 16:58:26.754724098 +0000 UTC m=+681.594866193 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/9b1843f8-233e-4a9a-b135-8a0d977ffea8-plugin-serving-cert") pod "nmstate-console-plugin-5874bd7bc5-2kzbf" (UID: "9b1843f8-233e-4a9a-b135-8a0d977ffea8") : secret "plugin-serving-cert" not found Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.255918 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/9b1843f8-233e-4a9a-b135-8a0d977ffea8-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-2kzbf\" (UID: \"9b1843f8-233e-4a9a-b135-8a0d977ffea8\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2kzbf" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.274581 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x5mdg\" (UniqueName: \"kubernetes.io/projected/9b1843f8-233e-4a9a-b135-8a0d977ffea8-kube-api-access-x5mdg\") pod \"nmstate-console-plugin-5874bd7bc5-2kzbf\" (UID: \"9b1843f8-233e-4a9a-b135-8a0d977ffea8\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2kzbf" Nov 25 16:58:26 crc kubenswrapper[4812]: W1125 16:58:26.275812 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8af96a4a_80d0_4e12_8050_8a0290dd5123.slice/crio-945121eddc7dd079e65a60fa69351b21a8be1fac60ca86cdc01a05d70ab9fc72 WatchSource:0}: Error finding container 945121eddc7dd079e65a60fa69351b21a8be1fac60ca86cdc01a05d70ab9fc72: Status 404 returned error can't find the container with id 945121eddc7dd079e65a60fa69351b21a8be1fac60ca86cdc01a05d70ab9fc72 Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.355995 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/9dd92b16-fe36-4ce9-8e7d-318737f5fc90-service-ca\") pod \"console-7fbcd55fbb-dr9jj\" (UID: \"9dd92b16-fe36-4ce9-8e7d-318737f5fc90\") " pod="openshift-console/console-7fbcd55fbb-dr9jj" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.356051 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/9dd92b16-fe36-4ce9-8e7d-318737f5fc90-oauth-serving-cert\") pod \"console-7fbcd55fbb-dr9jj\" (UID: \"9dd92b16-fe36-4ce9-8e7d-318737f5fc90\") " pod="openshift-console/console-7fbcd55fbb-dr9jj" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.356075 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/9dd92b16-fe36-4ce9-8e7d-318737f5fc90-console-config\") pod \"console-7fbcd55fbb-dr9jj\" (UID: \"9dd92b16-fe36-4ce9-8e7d-318737f5fc90\") " pod="openshift-console/console-7fbcd55fbb-dr9jj" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.356123 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/9dd92b16-fe36-4ce9-8e7d-318737f5fc90-console-serving-cert\") pod \"console-7fbcd55fbb-dr9jj\" (UID: \"9dd92b16-fe36-4ce9-8e7d-318737f5fc90\") " pod="openshift-console/console-7fbcd55fbb-dr9jj" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.356145 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-blphp\" (UniqueName: \"kubernetes.io/projected/9dd92b16-fe36-4ce9-8e7d-318737f5fc90-kube-api-access-blphp\") pod \"console-7fbcd55fbb-dr9jj\" (UID: \"9dd92b16-fe36-4ce9-8e7d-318737f5fc90\") " pod="openshift-console/console-7fbcd55fbb-dr9jj" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.356162 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9dd92b16-fe36-4ce9-8e7d-318737f5fc90-trusted-ca-bundle\") pod \"console-7fbcd55fbb-dr9jj\" (UID: \"9dd92b16-fe36-4ce9-8e7d-318737f5fc90\") " pod="openshift-console/console-7fbcd55fbb-dr9jj" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.356177 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/9dd92b16-fe36-4ce9-8e7d-318737f5fc90-console-oauth-config\") pod \"console-7fbcd55fbb-dr9jj\" (UID: \"9dd92b16-fe36-4ce9-8e7d-318737f5fc90\") " pod="openshift-console/console-7fbcd55fbb-dr9jj" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.415930 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-hbrxh"] Nov 25 16:58:26 crc kubenswrapper[4812]: W1125 16:58:26.426340 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod098fce70_9b90_4d5b_8105_b60be0cdadef.slice/crio-a87aec36cba1b5521a72a4226ee6536872998f6f3c39e1d86edc35e21ad082a8 WatchSource:0}: Error finding container a87aec36cba1b5521a72a4226ee6536872998f6f3c39e1d86edc35e21ad082a8: Status 404 returned error can't find the container with id a87aec36cba1b5521a72a4226ee6536872998f6f3c39e1d86edc35e21ad082a8 Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.458114 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-q7qht"] Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.458685 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/9dd92b16-fe36-4ce9-8e7d-318737f5fc90-console-serving-cert\") pod \"console-7fbcd55fbb-dr9jj\" (UID: \"9dd92b16-fe36-4ce9-8e7d-318737f5fc90\") " pod="openshift-console/console-7fbcd55fbb-dr9jj" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.458760 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-blphp\" (UniqueName: \"kubernetes.io/projected/9dd92b16-fe36-4ce9-8e7d-318737f5fc90-kube-api-access-blphp\") pod \"console-7fbcd55fbb-dr9jj\" (UID: \"9dd92b16-fe36-4ce9-8e7d-318737f5fc90\") " pod="openshift-console/console-7fbcd55fbb-dr9jj" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.458794 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9dd92b16-fe36-4ce9-8e7d-318737f5fc90-trusted-ca-bundle\") pod \"console-7fbcd55fbb-dr9jj\" (UID: \"9dd92b16-fe36-4ce9-8e7d-318737f5fc90\") " pod="openshift-console/console-7fbcd55fbb-dr9jj" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.458820 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/9dd92b16-fe36-4ce9-8e7d-318737f5fc90-console-oauth-config\") pod \"console-7fbcd55fbb-dr9jj\" (UID: \"9dd92b16-fe36-4ce9-8e7d-318737f5fc90\") " pod="openshift-console/console-7fbcd55fbb-dr9jj" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.458872 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/9dd92b16-fe36-4ce9-8e7d-318737f5fc90-service-ca\") pod \"console-7fbcd55fbb-dr9jj\" (UID: \"9dd92b16-fe36-4ce9-8e7d-318737f5fc90\") " pod="openshift-console/console-7fbcd55fbb-dr9jj" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.458922 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/9dd92b16-fe36-4ce9-8e7d-318737f5fc90-oauth-serving-cert\") pod \"console-7fbcd55fbb-dr9jj\" (UID: \"9dd92b16-fe36-4ce9-8e7d-318737f5fc90\") " pod="openshift-console/console-7fbcd55fbb-dr9jj" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.458952 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/9dd92b16-fe36-4ce9-8e7d-318737f5fc90-console-config\") pod \"console-7fbcd55fbb-dr9jj\" (UID: \"9dd92b16-fe36-4ce9-8e7d-318737f5fc90\") " pod="openshift-console/console-7fbcd55fbb-dr9jj" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.460106 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9dd92b16-fe36-4ce9-8e7d-318737f5fc90-trusted-ca-bundle\") pod \"console-7fbcd55fbb-dr9jj\" (UID: \"9dd92b16-fe36-4ce9-8e7d-318737f5fc90\") " pod="openshift-console/console-7fbcd55fbb-dr9jj" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.460572 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/9dd92b16-fe36-4ce9-8e7d-318737f5fc90-oauth-serving-cert\") pod \"console-7fbcd55fbb-dr9jj\" (UID: \"9dd92b16-fe36-4ce9-8e7d-318737f5fc90\") " pod="openshift-console/console-7fbcd55fbb-dr9jj" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.460795 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/9dd92b16-fe36-4ce9-8e7d-318737f5fc90-console-config\") pod \"console-7fbcd55fbb-dr9jj\" (UID: \"9dd92b16-fe36-4ce9-8e7d-318737f5fc90\") " pod="openshift-console/console-7fbcd55fbb-dr9jj" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.461388 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/9dd92b16-fe36-4ce9-8e7d-318737f5fc90-service-ca\") pod \"console-7fbcd55fbb-dr9jj\" (UID: \"9dd92b16-fe36-4ce9-8e7d-318737f5fc90\") " pod="openshift-console/console-7fbcd55fbb-dr9jj" Nov 25 16:58:26 crc kubenswrapper[4812]: W1125 16:58:26.462796 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6a64a231_50f8_4025_811f_0c0b1941c17e.slice/crio-38d5b32ae7bd2f283f64a77393b53bb32062640511022fdc1e6b45f5d6684603 WatchSource:0}: Error finding container 38d5b32ae7bd2f283f64a77393b53bb32062640511022fdc1e6b45f5d6684603: Status 404 returned error can't find the container with id 38d5b32ae7bd2f283f64a77393b53bb32062640511022fdc1e6b45f5d6684603 Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.463180 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/9dd92b16-fe36-4ce9-8e7d-318737f5fc90-console-serving-cert\") pod \"console-7fbcd55fbb-dr9jj\" (UID: \"9dd92b16-fe36-4ce9-8e7d-318737f5fc90\") " pod="openshift-console/console-7fbcd55fbb-dr9jj" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.464303 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/9dd92b16-fe36-4ce9-8e7d-318737f5fc90-console-oauth-config\") pod \"console-7fbcd55fbb-dr9jj\" (UID: \"9dd92b16-fe36-4ce9-8e7d-318737f5fc90\") " pod="openshift-console/console-7fbcd55fbb-dr9jj" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.475474 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-blphp\" (UniqueName: \"kubernetes.io/projected/9dd92b16-fe36-4ce9-8e7d-318737f5fc90-kube-api-access-blphp\") pod \"console-7fbcd55fbb-dr9jj\" (UID: \"9dd92b16-fe36-4ce9-8e7d-318737f5fc90\") " pod="openshift-console/console-7fbcd55fbb-dr9jj" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.524817 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7fbcd55fbb-dr9jj" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.688893 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-7fbcd55fbb-dr9jj"] Nov 25 16:58:26 crc kubenswrapper[4812]: W1125 16:58:26.695187 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9dd92b16_fe36_4ce9_8e7d_318737f5fc90.slice/crio-6cde5fa008b38dd65bf4e7eceab18736f64c86acb78e1c8b51a69dbca7440878 WatchSource:0}: Error finding container 6cde5fa008b38dd65bf4e7eceab18736f64c86acb78e1c8b51a69dbca7440878: Status 404 returned error can't find the container with id 6cde5fa008b38dd65bf4e7eceab18736f64c86acb78e1c8b51a69dbca7440878 Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.763013 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/9b1843f8-233e-4a9a-b135-8a0d977ffea8-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-2kzbf\" (UID: \"9b1843f8-233e-4a9a-b135-8a0d977ffea8\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2kzbf" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.767477 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/9b1843f8-233e-4a9a-b135-8a0d977ffea8-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-2kzbf\" (UID: \"9b1843f8-233e-4a9a-b135-8a0d977ffea8\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2kzbf" Nov 25 16:58:26 crc kubenswrapper[4812]: I1125 16:58:26.931054 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2kzbf" Nov 25 16:58:27 crc kubenswrapper[4812]: I1125 16:58:27.068934 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-hbrxh" event={"ID":"098fce70-9b90-4d5b-8105-b60be0cdadef","Type":"ContainerStarted","Data":"a87aec36cba1b5521a72a4226ee6536872998f6f3c39e1d86edc35e21ad082a8"} Nov 25 16:58:27 crc kubenswrapper[4812]: I1125 16:58:27.070796 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-h42z9" event={"ID":"8af96a4a-80d0-4e12-8050-8a0290dd5123","Type":"ContainerStarted","Data":"945121eddc7dd079e65a60fa69351b21a8be1fac60ca86cdc01a05d70ab9fc72"} Nov 25 16:58:27 crc kubenswrapper[4812]: I1125 16:58:27.072508 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7fbcd55fbb-dr9jj" event={"ID":"9dd92b16-fe36-4ce9-8e7d-318737f5fc90","Type":"ContainerStarted","Data":"108f22a1d6f62f1a188de8ffbb8c8ca3f858d3c1a18915822f1d694d04b23dfb"} Nov 25 16:58:27 crc kubenswrapper[4812]: I1125 16:58:27.072554 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7fbcd55fbb-dr9jj" event={"ID":"9dd92b16-fe36-4ce9-8e7d-318737f5fc90","Type":"ContainerStarted","Data":"6cde5fa008b38dd65bf4e7eceab18736f64c86acb78e1c8b51a69dbca7440878"} Nov 25 16:58:27 crc kubenswrapper[4812]: I1125 16:58:27.075702 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-q7qht" event={"ID":"6a64a231-50f8-4025-811f-0c0b1941c17e","Type":"ContainerStarted","Data":"38d5b32ae7bd2f283f64a77393b53bb32062640511022fdc1e6b45f5d6684603"} Nov 25 16:58:27 crc kubenswrapper[4812]: I1125 16:58:27.094631 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-7fbcd55fbb-dr9jj" podStartSLOduration=1.094600073 podStartE2EDuration="1.094600073s" podCreationTimestamp="2025-11-25 16:58:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:58:27.089662379 +0000 UTC m=+681.929804474" watchObservedRunningTime="2025-11-25 16:58:27.094600073 +0000 UTC m=+681.934742168" Nov 25 16:58:27 crc kubenswrapper[4812]: I1125 16:58:27.099617 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2kzbf"] Nov 25 16:58:28 crc kubenswrapper[4812]: I1125 16:58:28.082416 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2kzbf" event={"ID":"9b1843f8-233e-4a9a-b135-8a0d977ffea8","Type":"ContainerStarted","Data":"30f23433f58db70f430113e308a84e48ff1d0c03cee5ac86d214c92a3198e309"} Nov 25 16:58:29 crc kubenswrapper[4812]: I1125 16:58:29.089791 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-h42z9" event={"ID":"8af96a4a-80d0-4e12-8050-8a0290dd5123","Type":"ContainerStarted","Data":"13e63e64be77583f78a79368ebbcb13c9f0c99062d7f9dd3c9a621c2c7bdde14"} Nov 25 16:58:29 crc kubenswrapper[4812]: I1125 16:58:29.090428 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-h42z9" Nov 25 16:58:29 crc kubenswrapper[4812]: I1125 16:58:29.091673 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-q7qht" event={"ID":"6a64a231-50f8-4025-811f-0c0b1941c17e","Type":"ContainerStarted","Data":"3f9502091a8612677ad32cdda71952d5a1a6dedd041434bd8184bd7c652176e6"} Nov 25 16:58:29 crc kubenswrapper[4812]: I1125 16:58:29.091885 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-q7qht" Nov 25 16:58:29 crc kubenswrapper[4812]: I1125 16:58:29.094707 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-hbrxh" event={"ID":"098fce70-9b90-4d5b-8105-b60be0cdadef","Type":"ContainerStarted","Data":"0fbf6ba97cbd35d48d2c5b8913bb74714d8be9e82579b13f77aad2b1406fc0ad"} Nov 25 16:58:29 crc kubenswrapper[4812]: I1125 16:58:29.105213 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-h42z9" podStartSLOduration=1.764352813 podStartE2EDuration="4.105168177s" podCreationTimestamp="2025-11-25 16:58:25 +0000 UTC" firstStartedPulling="2025-11-25 16:58:26.279484889 +0000 UTC m=+681.119626984" lastFinishedPulling="2025-11-25 16:58:28.620300253 +0000 UTC m=+683.460442348" observedRunningTime="2025-11-25 16:58:29.103144926 +0000 UTC m=+683.943287021" watchObservedRunningTime="2025-11-25 16:58:29.105168177 +0000 UTC m=+683.945310272" Nov 25 16:58:29 crc kubenswrapper[4812]: I1125 16:58:29.124930 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-q7qht" podStartSLOduration=1.843836501 podStartE2EDuration="4.124893223s" podCreationTimestamp="2025-11-25 16:58:25 +0000 UTC" firstStartedPulling="2025-11-25 16:58:26.464818497 +0000 UTC m=+681.304960592" lastFinishedPulling="2025-11-25 16:58:28.745875219 +0000 UTC m=+683.586017314" observedRunningTime="2025-11-25 16:58:29.121027226 +0000 UTC m=+683.961169331" watchObservedRunningTime="2025-11-25 16:58:29.124893223 +0000 UTC m=+683.965035318" Nov 25 16:58:30 crc kubenswrapper[4812]: I1125 16:58:30.103004 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2kzbf" event={"ID":"9b1843f8-233e-4a9a-b135-8a0d977ffea8","Type":"ContainerStarted","Data":"abb087a961135ce2aee598d9987d67b44a8f0f56bb39cf5179a05e4b98cce3cc"} Nov 25 16:58:30 crc kubenswrapper[4812]: I1125 16:58:30.123604 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-2kzbf" podStartSLOduration=2.64042144 podStartE2EDuration="5.12357652s" podCreationTimestamp="2025-11-25 16:58:25 +0000 UTC" firstStartedPulling="2025-11-25 16:58:27.118412382 +0000 UTC m=+681.958554477" lastFinishedPulling="2025-11-25 16:58:29.601567462 +0000 UTC m=+684.441709557" observedRunningTime="2025-11-25 16:58:30.122981354 +0000 UTC m=+684.963123489" watchObservedRunningTime="2025-11-25 16:58:30.12357652 +0000 UTC m=+684.963718635" Nov 25 16:58:32 crc kubenswrapper[4812]: I1125 16:58:32.122045 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-hbrxh" event={"ID":"098fce70-9b90-4d5b-8105-b60be0cdadef","Type":"ContainerStarted","Data":"8706d4fc2f1bac35156ef183f1c0f22c39efaf4eaa475603aaec1e84199d0b7f"} Nov 25 16:58:32 crc kubenswrapper[4812]: I1125 16:58:32.139829 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-hbrxh" podStartSLOduration=2.5546952960000002 podStartE2EDuration="7.139800928s" podCreationTimestamp="2025-11-25 16:58:25 +0000 UTC" firstStartedPulling="2025-11-25 16:58:26.428057473 +0000 UTC m=+681.268199568" lastFinishedPulling="2025-11-25 16:58:31.013163105 +0000 UTC m=+685.853305200" observedRunningTime="2025-11-25 16:58:32.136149676 +0000 UTC m=+686.976291791" watchObservedRunningTime="2025-11-25 16:58:32.139800928 +0000 UTC m=+686.979943023" Nov 25 16:58:36 crc kubenswrapper[4812]: I1125 16:58:36.283554 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-h42z9" Nov 25 16:58:36 crc kubenswrapper[4812]: I1125 16:58:36.526337 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-7fbcd55fbb-dr9jj" Nov 25 16:58:36 crc kubenswrapper[4812]: I1125 16:58:36.526886 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-7fbcd55fbb-dr9jj" Nov 25 16:58:36 crc kubenswrapper[4812]: I1125 16:58:36.533692 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-7fbcd55fbb-dr9jj" Nov 25 16:58:37 crc kubenswrapper[4812]: I1125 16:58:37.158906 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-7fbcd55fbb-dr9jj" Nov 25 16:58:37 crc kubenswrapper[4812]: I1125 16:58:37.218923 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-nj6w8"] Nov 25 16:58:46 crc kubenswrapper[4812]: I1125 16:58:46.242147 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-q7qht" Nov 25 16:58:58 crc kubenswrapper[4812]: I1125 16:58:58.846451 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6z4snd"] Nov 25 16:58:58 crc kubenswrapper[4812]: I1125 16:58:58.848375 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6z4snd" Nov 25 16:58:58 crc kubenswrapper[4812]: I1125 16:58:58.850428 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 25 16:58:58 crc kubenswrapper[4812]: I1125 16:58:58.857231 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6z4snd"] Nov 25 16:58:59 crc kubenswrapper[4812]: I1125 16:58:59.005194 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8xlnn\" (UniqueName: \"kubernetes.io/projected/ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4-kube-api-access-8xlnn\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6z4snd\" (UID: \"ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6z4snd" Nov 25 16:58:59 crc kubenswrapper[4812]: I1125 16:58:59.005266 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6z4snd\" (UID: \"ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6z4snd" Nov 25 16:58:59 crc kubenswrapper[4812]: I1125 16:58:59.005291 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6z4snd\" (UID: \"ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6z4snd" Nov 25 16:58:59 crc kubenswrapper[4812]: I1125 16:58:59.106688 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8xlnn\" (UniqueName: \"kubernetes.io/projected/ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4-kube-api-access-8xlnn\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6z4snd\" (UID: \"ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6z4snd" Nov 25 16:58:59 crc kubenswrapper[4812]: I1125 16:58:59.106790 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6z4snd\" (UID: \"ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6z4snd" Nov 25 16:58:59 crc kubenswrapper[4812]: I1125 16:58:59.106829 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6z4snd\" (UID: \"ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6z4snd" Nov 25 16:58:59 crc kubenswrapper[4812]: I1125 16:58:59.107398 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6z4snd\" (UID: \"ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6z4snd" Nov 25 16:58:59 crc kubenswrapper[4812]: I1125 16:58:59.107487 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6z4snd\" (UID: \"ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6z4snd" Nov 25 16:58:59 crc kubenswrapper[4812]: I1125 16:58:59.125218 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8xlnn\" (UniqueName: \"kubernetes.io/projected/ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4-kube-api-access-8xlnn\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6z4snd\" (UID: \"ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6z4snd" Nov 25 16:58:59 crc kubenswrapper[4812]: I1125 16:58:59.168642 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6z4snd" Nov 25 16:58:59 crc kubenswrapper[4812]: I1125 16:58:59.378907 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6z4snd"] Nov 25 16:59:00 crc kubenswrapper[4812]: I1125 16:59:00.287144 4812 generic.go:334] "Generic (PLEG): container finished" podID="ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4" containerID="e99bb267b8a03b4f793a99588b04a372784cd5f67a0f98ba2614a364bf804222" exitCode=0 Nov 25 16:59:00 crc kubenswrapper[4812]: I1125 16:59:00.287219 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6z4snd" event={"ID":"ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4","Type":"ContainerDied","Data":"e99bb267b8a03b4f793a99588b04a372784cd5f67a0f98ba2614a364bf804222"} Nov 25 16:59:00 crc kubenswrapper[4812]: I1125 16:59:00.287271 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6z4snd" event={"ID":"ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4","Type":"ContainerStarted","Data":"4c9830b90cacca4b6d746b7b030e998d027bfd5dd585121f2c8cddadf39e30e2"} Nov 25 16:59:02 crc kubenswrapper[4812]: I1125 16:59:02.278438 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-nj6w8" podUID="f139c50a-da2a-4407-a9dd-ccaabc7e5dcf" containerName="console" containerID="cri-o://5753762cbf49f0682f0f0a151110b4880f4bae747d2a689a45dccd14bf722c67" gracePeriod=15 Nov 25 16:59:02 crc kubenswrapper[4812]: I1125 16:59:02.300923 4812 generic.go:334] "Generic (PLEG): container finished" podID="ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4" containerID="daee6b1561e694d0afecf94a673c3588a81a4b0b8dd09dddf0f1fd3c26ecdec7" exitCode=0 Nov 25 16:59:02 crc kubenswrapper[4812]: I1125 16:59:02.300976 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6z4snd" event={"ID":"ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4","Type":"ContainerDied","Data":"daee6b1561e694d0afecf94a673c3588a81a4b0b8dd09dddf0f1fd3c26ecdec7"} Nov 25 16:59:02 crc kubenswrapper[4812]: I1125 16:59:02.679683 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-nj6w8_f139c50a-da2a-4407-a9dd-ccaabc7e5dcf/console/0.log" Nov 25 16:59:02 crc kubenswrapper[4812]: I1125 16:59:02.680040 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-nj6w8" Nov 25 16:59:02 crc kubenswrapper[4812]: I1125 16:59:02.754148 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-console-config\") pod \"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf\" (UID: \"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf\") " Nov 25 16:59:02 crc kubenswrapper[4812]: I1125 16:59:02.754206 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r5gk8\" (UniqueName: \"kubernetes.io/projected/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-kube-api-access-r5gk8\") pod \"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf\" (UID: \"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf\") " Nov 25 16:59:02 crc kubenswrapper[4812]: I1125 16:59:02.754306 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-service-ca\") pod \"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf\" (UID: \"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf\") " Nov 25 16:59:02 crc kubenswrapper[4812]: I1125 16:59:02.754342 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-console-oauth-config\") pod \"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf\" (UID: \"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf\") " Nov 25 16:59:02 crc kubenswrapper[4812]: I1125 16:59:02.754379 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-trusted-ca-bundle\") pod \"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf\" (UID: \"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf\") " Nov 25 16:59:02 crc kubenswrapper[4812]: I1125 16:59:02.754430 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-oauth-serving-cert\") pod \"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf\" (UID: \"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf\") " Nov 25 16:59:02 crc kubenswrapper[4812]: I1125 16:59:02.754481 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-console-serving-cert\") pod \"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf\" (UID: \"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf\") " Nov 25 16:59:02 crc kubenswrapper[4812]: I1125 16:59:02.755180 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-service-ca" (OuterVolumeSpecName: "service-ca") pod "f139c50a-da2a-4407-a9dd-ccaabc7e5dcf" (UID: "f139c50a-da2a-4407-a9dd-ccaabc7e5dcf"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:59:02 crc kubenswrapper[4812]: I1125 16:59:02.755190 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "f139c50a-da2a-4407-a9dd-ccaabc7e5dcf" (UID: "f139c50a-da2a-4407-a9dd-ccaabc7e5dcf"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:59:02 crc kubenswrapper[4812]: I1125 16:59:02.755223 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-console-config" (OuterVolumeSpecName: "console-config") pod "f139c50a-da2a-4407-a9dd-ccaabc7e5dcf" (UID: "f139c50a-da2a-4407-a9dd-ccaabc7e5dcf"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:59:02 crc kubenswrapper[4812]: I1125 16:59:02.755779 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "f139c50a-da2a-4407-a9dd-ccaabc7e5dcf" (UID: "f139c50a-da2a-4407-a9dd-ccaabc7e5dcf"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:59:02 crc kubenswrapper[4812]: I1125 16:59:02.759630 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "f139c50a-da2a-4407-a9dd-ccaabc7e5dcf" (UID: "f139c50a-da2a-4407-a9dd-ccaabc7e5dcf"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:59:02 crc kubenswrapper[4812]: I1125 16:59:02.760308 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-kube-api-access-r5gk8" (OuterVolumeSpecName: "kube-api-access-r5gk8") pod "f139c50a-da2a-4407-a9dd-ccaabc7e5dcf" (UID: "f139c50a-da2a-4407-a9dd-ccaabc7e5dcf"). InnerVolumeSpecName "kube-api-access-r5gk8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:59:02 crc kubenswrapper[4812]: I1125 16:59:02.760357 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "f139c50a-da2a-4407-a9dd-ccaabc7e5dcf" (UID: "f139c50a-da2a-4407-a9dd-ccaabc7e5dcf"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:59:02 crc kubenswrapper[4812]: I1125 16:59:02.856211 4812 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 16:59:02 crc kubenswrapper[4812]: I1125 16:59:02.856275 4812 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 25 16:59:02 crc kubenswrapper[4812]: I1125 16:59:02.856293 4812 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 16:59:02 crc kubenswrapper[4812]: I1125 16:59:02.856303 4812 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 16:59:02 crc kubenswrapper[4812]: I1125 16:59:02.856311 4812 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 16:59:02 crc kubenswrapper[4812]: I1125 16:59:02.856319 4812 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-console-config\") on node \"crc\" DevicePath \"\"" Nov 25 16:59:02 crc kubenswrapper[4812]: I1125 16:59:02.856328 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r5gk8\" (UniqueName: \"kubernetes.io/projected/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf-kube-api-access-r5gk8\") on node \"crc\" DevicePath \"\"" Nov 25 16:59:03 crc kubenswrapper[4812]: I1125 16:59:03.311136 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-nj6w8_f139c50a-da2a-4407-a9dd-ccaabc7e5dcf/console/0.log" Nov 25 16:59:03 crc kubenswrapper[4812]: I1125 16:59:03.311526 4812 generic.go:334] "Generic (PLEG): container finished" podID="f139c50a-da2a-4407-a9dd-ccaabc7e5dcf" containerID="5753762cbf49f0682f0f0a151110b4880f4bae747d2a689a45dccd14bf722c67" exitCode=2 Nov 25 16:59:03 crc kubenswrapper[4812]: I1125 16:59:03.311667 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-nj6w8" Nov 25 16:59:03 crc kubenswrapper[4812]: I1125 16:59:03.311695 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-nj6w8" event={"ID":"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf","Type":"ContainerDied","Data":"5753762cbf49f0682f0f0a151110b4880f4bae747d2a689a45dccd14bf722c67"} Nov 25 16:59:03 crc kubenswrapper[4812]: I1125 16:59:03.311745 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-nj6w8" event={"ID":"f139c50a-da2a-4407-a9dd-ccaabc7e5dcf","Type":"ContainerDied","Data":"a7c90059d5030c08a459a2626af94154eb40bdafd38b5ca0a74d3a5149585a7b"} Nov 25 16:59:03 crc kubenswrapper[4812]: I1125 16:59:03.311773 4812 scope.go:117] "RemoveContainer" containerID="5753762cbf49f0682f0f0a151110b4880f4bae747d2a689a45dccd14bf722c67" Nov 25 16:59:03 crc kubenswrapper[4812]: I1125 16:59:03.315179 4812 generic.go:334] "Generic (PLEG): container finished" podID="ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4" containerID="ce8f3243c87564af69714136bacdfb12ed8b59ef8f62904703f8ac9c38775c59" exitCode=0 Nov 25 16:59:03 crc kubenswrapper[4812]: I1125 16:59:03.315243 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6z4snd" event={"ID":"ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4","Type":"ContainerDied","Data":"ce8f3243c87564af69714136bacdfb12ed8b59ef8f62904703f8ac9c38775c59"} Nov 25 16:59:03 crc kubenswrapper[4812]: I1125 16:59:03.338918 4812 scope.go:117] "RemoveContainer" containerID="5753762cbf49f0682f0f0a151110b4880f4bae747d2a689a45dccd14bf722c67" Nov 25 16:59:03 crc kubenswrapper[4812]: E1125 16:59:03.339865 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5753762cbf49f0682f0f0a151110b4880f4bae747d2a689a45dccd14bf722c67\": container with ID starting with 5753762cbf49f0682f0f0a151110b4880f4bae747d2a689a45dccd14bf722c67 not found: ID does not exist" containerID="5753762cbf49f0682f0f0a151110b4880f4bae747d2a689a45dccd14bf722c67" Nov 25 16:59:03 crc kubenswrapper[4812]: I1125 16:59:03.339991 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5753762cbf49f0682f0f0a151110b4880f4bae747d2a689a45dccd14bf722c67"} err="failed to get container status \"5753762cbf49f0682f0f0a151110b4880f4bae747d2a689a45dccd14bf722c67\": rpc error: code = NotFound desc = could not find container \"5753762cbf49f0682f0f0a151110b4880f4bae747d2a689a45dccd14bf722c67\": container with ID starting with 5753762cbf49f0682f0f0a151110b4880f4bae747d2a689a45dccd14bf722c67 not found: ID does not exist" Nov 25 16:59:03 crc kubenswrapper[4812]: I1125 16:59:03.353397 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-nj6w8"] Nov 25 16:59:03 crc kubenswrapper[4812]: I1125 16:59:03.357601 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-nj6w8"] Nov 25 16:59:03 crc kubenswrapper[4812]: I1125 16:59:03.841480 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f139c50a-da2a-4407-a9dd-ccaabc7e5dcf" path="/var/lib/kubelet/pods/f139c50a-da2a-4407-a9dd-ccaabc7e5dcf/volumes" Nov 25 16:59:04 crc kubenswrapper[4812]: I1125 16:59:04.521688 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6z4snd" Nov 25 16:59:04 crc kubenswrapper[4812]: I1125 16:59:04.678299 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8xlnn\" (UniqueName: \"kubernetes.io/projected/ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4-kube-api-access-8xlnn\") pod \"ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4\" (UID: \"ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4\") " Nov 25 16:59:04 crc kubenswrapper[4812]: I1125 16:59:04.678657 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4-bundle\") pod \"ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4\" (UID: \"ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4\") " Nov 25 16:59:04 crc kubenswrapper[4812]: I1125 16:59:04.678701 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4-util\") pod \"ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4\" (UID: \"ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4\") " Nov 25 16:59:04 crc kubenswrapper[4812]: I1125 16:59:04.679823 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4-bundle" (OuterVolumeSpecName: "bundle") pod "ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4" (UID: "ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:59:04 crc kubenswrapper[4812]: I1125 16:59:04.683133 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4-kube-api-access-8xlnn" (OuterVolumeSpecName: "kube-api-access-8xlnn") pod "ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4" (UID: "ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4"). InnerVolumeSpecName "kube-api-access-8xlnn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:59:04 crc kubenswrapper[4812]: I1125 16:59:04.780225 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8xlnn\" (UniqueName: \"kubernetes.io/projected/ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4-kube-api-access-8xlnn\") on node \"crc\" DevicePath \"\"" Nov 25 16:59:04 crc kubenswrapper[4812]: I1125 16:59:04.780276 4812 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 16:59:05 crc kubenswrapper[4812]: I1125 16:59:05.010089 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4-util" (OuterVolumeSpecName: "util") pod "ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4" (UID: "ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 16:59:05 crc kubenswrapper[4812]: I1125 16:59:05.084082 4812 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4-util\") on node \"crc\" DevicePath \"\"" Nov 25 16:59:05 crc kubenswrapper[4812]: I1125 16:59:05.331397 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6z4snd" event={"ID":"ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4","Type":"ContainerDied","Data":"4c9830b90cacca4b6d746b7b030e998d027bfd5dd585121f2c8cddadf39e30e2"} Nov 25 16:59:05 crc kubenswrapper[4812]: I1125 16:59:05.331451 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6z4snd" Nov 25 16:59:05 crc kubenswrapper[4812]: I1125 16:59:05.331457 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4c9830b90cacca4b6d746b7b030e998d027bfd5dd585121f2c8cddadf39e30e2" Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.135632 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-6f7b877f74-qcc8n"] Nov 25 16:59:14 crc kubenswrapper[4812]: E1125 16:59:14.136408 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f139c50a-da2a-4407-a9dd-ccaabc7e5dcf" containerName="console" Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.136422 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="f139c50a-da2a-4407-a9dd-ccaabc7e5dcf" containerName="console" Nov 25 16:59:14 crc kubenswrapper[4812]: E1125 16:59:14.136437 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4" containerName="extract" Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.136443 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4" containerName="extract" Nov 25 16:59:14 crc kubenswrapper[4812]: E1125 16:59:14.136455 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4" containerName="util" Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.136463 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4" containerName="util" Nov 25 16:59:14 crc kubenswrapper[4812]: E1125 16:59:14.136472 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4" containerName="pull" Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.136477 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4" containerName="pull" Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.136611 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="f139c50a-da2a-4407-a9dd-ccaabc7e5dcf" containerName="console" Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.136625 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddf3b3cf-2bcd-4f4b-8580-4ae4b2a6c2a4" containerName="extract" Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.137109 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-6f7b877f74-qcc8n" Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.140738 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.140758 4812 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.141340 4812 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-6z7sv" Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.141363 4812 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.141552 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.166834 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-6f7b877f74-qcc8n"] Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.213515 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/06fdd0d8-45b7-4787-9f77-24f76fccc672-apiservice-cert\") pod \"metallb-operator-controller-manager-6f7b877f74-qcc8n\" (UID: \"06fdd0d8-45b7-4787-9f77-24f76fccc672\") " pod="metallb-system/metallb-operator-controller-manager-6f7b877f74-qcc8n" Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.213631 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tttqz\" (UniqueName: \"kubernetes.io/projected/06fdd0d8-45b7-4787-9f77-24f76fccc672-kube-api-access-tttqz\") pod \"metallb-operator-controller-manager-6f7b877f74-qcc8n\" (UID: \"06fdd0d8-45b7-4787-9f77-24f76fccc672\") " pod="metallb-system/metallb-operator-controller-manager-6f7b877f74-qcc8n" Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.213682 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/06fdd0d8-45b7-4787-9f77-24f76fccc672-webhook-cert\") pod \"metallb-operator-controller-manager-6f7b877f74-qcc8n\" (UID: \"06fdd0d8-45b7-4787-9f77-24f76fccc672\") " pod="metallb-system/metallb-operator-controller-manager-6f7b877f74-qcc8n" Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.315065 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tttqz\" (UniqueName: \"kubernetes.io/projected/06fdd0d8-45b7-4787-9f77-24f76fccc672-kube-api-access-tttqz\") pod \"metallb-operator-controller-manager-6f7b877f74-qcc8n\" (UID: \"06fdd0d8-45b7-4787-9f77-24f76fccc672\") " pod="metallb-system/metallb-operator-controller-manager-6f7b877f74-qcc8n" Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.315152 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/06fdd0d8-45b7-4787-9f77-24f76fccc672-webhook-cert\") pod \"metallb-operator-controller-manager-6f7b877f74-qcc8n\" (UID: \"06fdd0d8-45b7-4787-9f77-24f76fccc672\") " pod="metallb-system/metallb-operator-controller-manager-6f7b877f74-qcc8n" Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.315196 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/06fdd0d8-45b7-4787-9f77-24f76fccc672-apiservice-cert\") pod \"metallb-operator-controller-manager-6f7b877f74-qcc8n\" (UID: \"06fdd0d8-45b7-4787-9f77-24f76fccc672\") " pod="metallb-system/metallb-operator-controller-manager-6f7b877f74-qcc8n" Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.328434 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/06fdd0d8-45b7-4787-9f77-24f76fccc672-apiservice-cert\") pod \"metallb-operator-controller-manager-6f7b877f74-qcc8n\" (UID: \"06fdd0d8-45b7-4787-9f77-24f76fccc672\") " pod="metallb-system/metallb-operator-controller-manager-6f7b877f74-qcc8n" Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.329321 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/06fdd0d8-45b7-4787-9f77-24f76fccc672-webhook-cert\") pod \"metallb-operator-controller-manager-6f7b877f74-qcc8n\" (UID: \"06fdd0d8-45b7-4787-9f77-24f76fccc672\") " pod="metallb-system/metallb-operator-controller-manager-6f7b877f74-qcc8n" Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.334627 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tttqz\" (UniqueName: \"kubernetes.io/projected/06fdd0d8-45b7-4787-9f77-24f76fccc672-kube-api-access-tttqz\") pod \"metallb-operator-controller-manager-6f7b877f74-qcc8n\" (UID: \"06fdd0d8-45b7-4787-9f77-24f76fccc672\") " pod="metallb-system/metallb-operator-controller-manager-6f7b877f74-qcc8n" Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.419166 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-58bc6cd89d-tww4z"] Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.420000 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-58bc6cd89d-tww4z" Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.422709 4812 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.423109 4812 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.423935 4812 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-gnkrs" Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.450853 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-58bc6cd89d-tww4z"] Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.463200 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-6f7b877f74-qcc8n" Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.518286 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/acad3da2-9f4b-45bd-bbe1-b8b475089e41-apiservice-cert\") pod \"metallb-operator-webhook-server-58bc6cd89d-tww4z\" (UID: \"acad3da2-9f4b-45bd-bbe1-b8b475089e41\") " pod="metallb-system/metallb-operator-webhook-server-58bc6cd89d-tww4z" Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.518388 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/acad3da2-9f4b-45bd-bbe1-b8b475089e41-webhook-cert\") pod \"metallb-operator-webhook-server-58bc6cd89d-tww4z\" (UID: \"acad3da2-9f4b-45bd-bbe1-b8b475089e41\") " pod="metallb-system/metallb-operator-webhook-server-58bc6cd89d-tww4z" Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.518443 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nhtb7\" (UniqueName: \"kubernetes.io/projected/acad3da2-9f4b-45bd-bbe1-b8b475089e41-kube-api-access-nhtb7\") pod \"metallb-operator-webhook-server-58bc6cd89d-tww4z\" (UID: \"acad3da2-9f4b-45bd-bbe1-b8b475089e41\") " pod="metallb-system/metallb-operator-webhook-server-58bc6cd89d-tww4z" Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.623556 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/acad3da2-9f4b-45bd-bbe1-b8b475089e41-webhook-cert\") pod \"metallb-operator-webhook-server-58bc6cd89d-tww4z\" (UID: \"acad3da2-9f4b-45bd-bbe1-b8b475089e41\") " pod="metallb-system/metallb-operator-webhook-server-58bc6cd89d-tww4z" Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.623932 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nhtb7\" (UniqueName: \"kubernetes.io/projected/acad3da2-9f4b-45bd-bbe1-b8b475089e41-kube-api-access-nhtb7\") pod \"metallb-operator-webhook-server-58bc6cd89d-tww4z\" (UID: \"acad3da2-9f4b-45bd-bbe1-b8b475089e41\") " pod="metallb-system/metallb-operator-webhook-server-58bc6cd89d-tww4z" Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.623954 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/acad3da2-9f4b-45bd-bbe1-b8b475089e41-apiservice-cert\") pod \"metallb-operator-webhook-server-58bc6cd89d-tww4z\" (UID: \"acad3da2-9f4b-45bd-bbe1-b8b475089e41\") " pod="metallb-system/metallb-operator-webhook-server-58bc6cd89d-tww4z" Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.634431 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/acad3da2-9f4b-45bd-bbe1-b8b475089e41-webhook-cert\") pod \"metallb-operator-webhook-server-58bc6cd89d-tww4z\" (UID: \"acad3da2-9f4b-45bd-bbe1-b8b475089e41\") " pod="metallb-system/metallb-operator-webhook-server-58bc6cd89d-tww4z" Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.635222 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/acad3da2-9f4b-45bd-bbe1-b8b475089e41-apiservice-cert\") pod \"metallb-operator-webhook-server-58bc6cd89d-tww4z\" (UID: \"acad3da2-9f4b-45bd-bbe1-b8b475089e41\") " pod="metallb-system/metallb-operator-webhook-server-58bc6cd89d-tww4z" Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.645438 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nhtb7\" (UniqueName: \"kubernetes.io/projected/acad3da2-9f4b-45bd-bbe1-b8b475089e41-kube-api-access-nhtb7\") pod \"metallb-operator-webhook-server-58bc6cd89d-tww4z\" (UID: \"acad3da2-9f4b-45bd-bbe1-b8b475089e41\") " pod="metallb-system/metallb-operator-webhook-server-58bc6cd89d-tww4z" Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.739061 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-58bc6cd89d-tww4z" Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.913195 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-6f7b877f74-qcc8n"] Nov 25 16:59:14 crc kubenswrapper[4812]: W1125 16:59:14.924746 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod06fdd0d8_45b7_4787_9f77_24f76fccc672.slice/crio-e70bc00e2f5cd34c3685a11c1ba103a3b6e3e58e51ec7349554836aa88186f68 WatchSource:0}: Error finding container e70bc00e2f5cd34c3685a11c1ba103a3b6e3e58e51ec7349554836aa88186f68: Status 404 returned error can't find the container with id e70bc00e2f5cd34c3685a11c1ba103a3b6e3e58e51ec7349554836aa88186f68 Nov 25 16:59:14 crc kubenswrapper[4812]: I1125 16:59:14.979339 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-58bc6cd89d-tww4z"] Nov 25 16:59:14 crc kubenswrapper[4812]: W1125 16:59:14.990182 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podacad3da2_9f4b_45bd_bbe1_b8b475089e41.slice/crio-cc007c74b1e0ec6bfe7368403d552cc9549a48ad4cbc73027318c6becd24eb8a WatchSource:0}: Error finding container cc007c74b1e0ec6bfe7368403d552cc9549a48ad4cbc73027318c6becd24eb8a: Status 404 returned error can't find the container with id cc007c74b1e0ec6bfe7368403d552cc9549a48ad4cbc73027318c6becd24eb8a Nov 25 16:59:15 crc kubenswrapper[4812]: I1125 16:59:15.389815 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6f7b877f74-qcc8n" event={"ID":"06fdd0d8-45b7-4787-9f77-24f76fccc672","Type":"ContainerStarted","Data":"e70bc00e2f5cd34c3685a11c1ba103a3b6e3e58e51ec7349554836aa88186f68"} Nov 25 16:59:15 crc kubenswrapper[4812]: I1125 16:59:15.390873 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-58bc6cd89d-tww4z" event={"ID":"acad3da2-9f4b-45bd-bbe1-b8b475089e41","Type":"ContainerStarted","Data":"cc007c74b1e0ec6bfe7368403d552cc9549a48ad4cbc73027318c6becd24eb8a"} Nov 25 16:59:21 crc kubenswrapper[4812]: I1125 16:59:21.430481 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6f7b877f74-qcc8n" event={"ID":"06fdd0d8-45b7-4787-9f77-24f76fccc672","Type":"ContainerStarted","Data":"1da88cc096d8df568c029f8c874429f0a4d07cfab2c6ecf55272cda60aaafc0e"} Nov 25 16:59:21 crc kubenswrapper[4812]: I1125 16:59:21.431130 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-6f7b877f74-qcc8n" Nov 25 16:59:21 crc kubenswrapper[4812]: I1125 16:59:21.432486 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-58bc6cd89d-tww4z" event={"ID":"acad3da2-9f4b-45bd-bbe1-b8b475089e41","Type":"ContainerStarted","Data":"2f6d4d42d11b5da3e20c79d6eb716fa89236020839b270ff0fb7f48fe5552e1b"} Nov 25 16:59:21 crc kubenswrapper[4812]: I1125 16:59:21.432655 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-58bc6cd89d-tww4z" Nov 25 16:59:21 crc kubenswrapper[4812]: I1125 16:59:21.455059 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-6f7b877f74-qcc8n" podStartSLOduration=1.351621809 podStartE2EDuration="7.455037142s" podCreationTimestamp="2025-11-25 16:59:14 +0000 UTC" firstStartedPulling="2025-11-25 16:59:14.926874702 +0000 UTC m=+729.767016797" lastFinishedPulling="2025-11-25 16:59:21.030290035 +0000 UTC m=+735.870432130" observedRunningTime="2025-11-25 16:59:21.448898493 +0000 UTC m=+736.289040588" watchObservedRunningTime="2025-11-25 16:59:21.455037142 +0000 UTC m=+736.295179237" Nov 25 16:59:21 crc kubenswrapper[4812]: I1125 16:59:21.471299 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-58bc6cd89d-tww4z" podStartSLOduration=1.41262691 podStartE2EDuration="7.47128014s" podCreationTimestamp="2025-11-25 16:59:14 +0000 UTC" firstStartedPulling="2025-11-25 16:59:14.994284915 +0000 UTC m=+729.834427010" lastFinishedPulling="2025-11-25 16:59:21.052938145 +0000 UTC m=+735.893080240" observedRunningTime="2025-11-25 16:59:21.469229644 +0000 UTC m=+736.309371759" watchObservedRunningTime="2025-11-25 16:59:21.47128014 +0000 UTC m=+736.311422235" Nov 25 16:59:34 crc kubenswrapper[4812]: I1125 16:59:34.757161 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-58bc6cd89d-tww4z" Nov 25 16:59:34 crc kubenswrapper[4812]: I1125 16:59:34.933971 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-9t6bj"] Nov 25 16:59:34 crc kubenswrapper[4812]: I1125 16:59:34.934237 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-9t6bj" podUID="01a09641-0222-4bd8-af33-bf92edcc229c" containerName="controller-manager" containerID="cri-o://061d0abb21477d54275876dbf9e552ea7f3b6875e51d7d0a6a848ba6044c20d8" gracePeriod=30 Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.011885 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-dgwd6"] Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.012865 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dgwd6" podUID="6ae73445-81df-49ec-9c77-da00d65eef40" containerName="route-controller-manager" containerID="cri-o://b0a7f77eeff76ec8f0761685121f56338cb6cced51aba0b4b9af55087de9636e" gracePeriod=30 Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.304684 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-9t6bj" Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.369708 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dgwd6" Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.412721 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/01a09641-0222-4bd8-af33-bf92edcc229c-client-ca\") pod \"01a09641-0222-4bd8-af33-bf92edcc229c\" (UID: \"01a09641-0222-4bd8-af33-bf92edcc229c\") " Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.412812 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01a09641-0222-4bd8-af33-bf92edcc229c-serving-cert\") pod \"01a09641-0222-4bd8-af33-bf92edcc229c\" (UID: \"01a09641-0222-4bd8-af33-bf92edcc229c\") " Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.412849 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m78w5\" (UniqueName: \"kubernetes.io/projected/01a09641-0222-4bd8-af33-bf92edcc229c-kube-api-access-m78w5\") pod \"01a09641-0222-4bd8-af33-bf92edcc229c\" (UID: \"01a09641-0222-4bd8-af33-bf92edcc229c\") " Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.412899 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01a09641-0222-4bd8-af33-bf92edcc229c-config\") pod \"01a09641-0222-4bd8-af33-bf92edcc229c\" (UID: \"01a09641-0222-4bd8-af33-bf92edcc229c\") " Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.412959 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/01a09641-0222-4bd8-af33-bf92edcc229c-proxy-ca-bundles\") pod \"01a09641-0222-4bd8-af33-bf92edcc229c\" (UID: \"01a09641-0222-4bd8-af33-bf92edcc229c\") " Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.413698 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01a09641-0222-4bd8-af33-bf92edcc229c-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "01a09641-0222-4bd8-af33-bf92edcc229c" (UID: "01a09641-0222-4bd8-af33-bf92edcc229c"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.413693 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01a09641-0222-4bd8-af33-bf92edcc229c-client-ca" (OuterVolumeSpecName: "client-ca") pod "01a09641-0222-4bd8-af33-bf92edcc229c" (UID: "01a09641-0222-4bd8-af33-bf92edcc229c"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.415085 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01a09641-0222-4bd8-af33-bf92edcc229c-config" (OuterVolumeSpecName: "config") pod "01a09641-0222-4bd8-af33-bf92edcc229c" (UID: "01a09641-0222-4bd8-af33-bf92edcc229c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.418505 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01a09641-0222-4bd8-af33-bf92edcc229c-kube-api-access-m78w5" (OuterVolumeSpecName: "kube-api-access-m78w5") pod "01a09641-0222-4bd8-af33-bf92edcc229c" (UID: "01a09641-0222-4bd8-af33-bf92edcc229c"). InnerVolumeSpecName "kube-api-access-m78w5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.423451 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01a09641-0222-4bd8-af33-bf92edcc229c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01a09641-0222-4bd8-af33-bf92edcc229c" (UID: "01a09641-0222-4bd8-af33-bf92edcc229c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.508159 4812 generic.go:334] "Generic (PLEG): container finished" podID="6ae73445-81df-49ec-9c77-da00d65eef40" containerID="b0a7f77eeff76ec8f0761685121f56338cb6cced51aba0b4b9af55087de9636e" exitCode=0 Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.508250 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dgwd6" event={"ID":"6ae73445-81df-49ec-9c77-da00d65eef40","Type":"ContainerDied","Data":"b0a7f77eeff76ec8f0761685121f56338cb6cced51aba0b4b9af55087de9636e"} Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.508262 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dgwd6" Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.508298 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dgwd6" event={"ID":"6ae73445-81df-49ec-9c77-da00d65eef40","Type":"ContainerDied","Data":"320971ed4b41217e6abefc5b08531d263297769ef00465c8bcf016826634c53c"} Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.508323 4812 scope.go:117] "RemoveContainer" containerID="b0a7f77eeff76ec8f0761685121f56338cb6cced51aba0b4b9af55087de9636e" Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.510059 4812 generic.go:334] "Generic (PLEG): container finished" podID="01a09641-0222-4bd8-af33-bf92edcc229c" containerID="061d0abb21477d54275876dbf9e552ea7f3b6875e51d7d0a6a848ba6044c20d8" exitCode=0 Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.510102 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-9t6bj" event={"ID":"01a09641-0222-4bd8-af33-bf92edcc229c","Type":"ContainerDied","Data":"061d0abb21477d54275876dbf9e552ea7f3b6875e51d7d0a6a848ba6044c20d8"} Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.510137 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-9t6bj" event={"ID":"01a09641-0222-4bd8-af33-bf92edcc229c","Type":"ContainerDied","Data":"b1adbbd29ac387a54f69c974adc7eadced01165a776c0aa95551813543eb84b2"} Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.510181 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-9t6bj" Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.514408 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6ae73445-81df-49ec-9c77-da00d65eef40-config\") pod \"6ae73445-81df-49ec-9c77-da00d65eef40\" (UID: \"6ae73445-81df-49ec-9c77-da00d65eef40\") " Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.514446 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6ae73445-81df-49ec-9c77-da00d65eef40-serving-cert\") pod \"6ae73445-81df-49ec-9c77-da00d65eef40\" (UID: \"6ae73445-81df-49ec-9c77-da00d65eef40\") " Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.514484 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b58lk\" (UniqueName: \"kubernetes.io/projected/6ae73445-81df-49ec-9c77-da00d65eef40-kube-api-access-b58lk\") pod \"6ae73445-81df-49ec-9c77-da00d65eef40\" (UID: \"6ae73445-81df-49ec-9c77-da00d65eef40\") " Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.514675 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6ae73445-81df-49ec-9c77-da00d65eef40-client-ca\") pod \"6ae73445-81df-49ec-9c77-da00d65eef40\" (UID: \"6ae73445-81df-49ec-9c77-da00d65eef40\") " Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.514980 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m78w5\" (UniqueName: \"kubernetes.io/projected/01a09641-0222-4bd8-af33-bf92edcc229c-kube-api-access-m78w5\") on node \"crc\" DevicePath \"\"" Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.515002 4812 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01a09641-0222-4bd8-af33-bf92edcc229c-config\") on node \"crc\" DevicePath \"\"" Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.515014 4812 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/01a09641-0222-4bd8-af33-bf92edcc229c-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.515025 4812 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/01a09641-0222-4bd8-af33-bf92edcc229c-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.515036 4812 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01a09641-0222-4bd8-af33-bf92edcc229c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.515353 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ae73445-81df-49ec-9c77-da00d65eef40-config" (OuterVolumeSpecName: "config") pod "6ae73445-81df-49ec-9c77-da00d65eef40" (UID: "6ae73445-81df-49ec-9c77-da00d65eef40"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.515551 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ae73445-81df-49ec-9c77-da00d65eef40-client-ca" (OuterVolumeSpecName: "client-ca") pod "6ae73445-81df-49ec-9c77-da00d65eef40" (UID: "6ae73445-81df-49ec-9c77-da00d65eef40"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.518443 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ae73445-81df-49ec-9c77-da00d65eef40-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6ae73445-81df-49ec-9c77-da00d65eef40" (UID: "6ae73445-81df-49ec-9c77-da00d65eef40"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.518641 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ae73445-81df-49ec-9c77-da00d65eef40-kube-api-access-b58lk" (OuterVolumeSpecName: "kube-api-access-b58lk") pod "6ae73445-81df-49ec-9c77-da00d65eef40" (UID: "6ae73445-81df-49ec-9c77-da00d65eef40"). InnerVolumeSpecName "kube-api-access-b58lk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.525811 4812 scope.go:117] "RemoveContainer" containerID="b0a7f77eeff76ec8f0761685121f56338cb6cced51aba0b4b9af55087de9636e" Nov 25 16:59:35 crc kubenswrapper[4812]: E1125 16:59:35.526237 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b0a7f77eeff76ec8f0761685121f56338cb6cced51aba0b4b9af55087de9636e\": container with ID starting with b0a7f77eeff76ec8f0761685121f56338cb6cced51aba0b4b9af55087de9636e not found: ID does not exist" containerID="b0a7f77eeff76ec8f0761685121f56338cb6cced51aba0b4b9af55087de9636e" Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.526275 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b0a7f77eeff76ec8f0761685121f56338cb6cced51aba0b4b9af55087de9636e"} err="failed to get container status \"b0a7f77eeff76ec8f0761685121f56338cb6cced51aba0b4b9af55087de9636e\": rpc error: code = NotFound desc = could not find container \"b0a7f77eeff76ec8f0761685121f56338cb6cced51aba0b4b9af55087de9636e\": container with ID starting with b0a7f77eeff76ec8f0761685121f56338cb6cced51aba0b4b9af55087de9636e not found: ID does not exist" Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.526303 4812 scope.go:117] "RemoveContainer" containerID="061d0abb21477d54275876dbf9e552ea7f3b6875e51d7d0a6a848ba6044c20d8" Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.539166 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-9t6bj"] Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.543967 4812 scope.go:117] "RemoveContainer" containerID="061d0abb21477d54275876dbf9e552ea7f3b6875e51d7d0a6a848ba6044c20d8" Nov 25 16:59:35 crc kubenswrapper[4812]: E1125 16:59:35.544752 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"061d0abb21477d54275876dbf9e552ea7f3b6875e51d7d0a6a848ba6044c20d8\": container with ID starting with 061d0abb21477d54275876dbf9e552ea7f3b6875e51d7d0a6a848ba6044c20d8 not found: ID does not exist" containerID="061d0abb21477d54275876dbf9e552ea7f3b6875e51d7d0a6a848ba6044c20d8" Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.544803 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"061d0abb21477d54275876dbf9e552ea7f3b6875e51d7d0a6a848ba6044c20d8"} err="failed to get container status \"061d0abb21477d54275876dbf9e552ea7f3b6875e51d7d0a6a848ba6044c20d8\": rpc error: code = NotFound desc = could not find container \"061d0abb21477d54275876dbf9e552ea7f3b6875e51d7d0a6a848ba6044c20d8\": container with ID starting with 061d0abb21477d54275876dbf9e552ea7f3b6875e51d7d0a6a848ba6044c20d8 not found: ID does not exist" Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.546139 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-9t6bj"] Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.616153 4812 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6ae73445-81df-49ec-9c77-da00d65eef40-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.616378 4812 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6ae73445-81df-49ec-9c77-da00d65eef40-config\") on node \"crc\" DevicePath \"\"" Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.616446 4812 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6ae73445-81df-49ec-9c77-da00d65eef40-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.616502 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b58lk\" (UniqueName: \"kubernetes.io/projected/6ae73445-81df-49ec-9c77-da00d65eef40-kube-api-access-b58lk\") on node \"crc\" DevicePath \"\"" Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.841807 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01a09641-0222-4bd8-af33-bf92edcc229c" path="/var/lib/kubelet/pods/01a09641-0222-4bd8-af33-bf92edcc229c/volumes" Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.842312 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-dgwd6"] Nov 25 16:59:35 crc kubenswrapper[4812]: I1125 16:59:35.842342 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-dgwd6"] Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.487857 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-564674b98b-762xs"] Nov 25 16:59:36 crc kubenswrapper[4812]: E1125 16:59:36.488760 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ae73445-81df-49ec-9c77-da00d65eef40" containerName="route-controller-manager" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.488849 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ae73445-81df-49ec-9c77-da00d65eef40" containerName="route-controller-manager" Nov 25 16:59:36 crc kubenswrapper[4812]: E1125 16:59:36.488961 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01a09641-0222-4bd8-af33-bf92edcc229c" containerName="controller-manager" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.489033 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="01a09641-0222-4bd8-af33-bf92edcc229c" containerName="controller-manager" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.489217 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="01a09641-0222-4bd8-af33-bf92edcc229c" containerName="controller-manager" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.489304 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ae73445-81df-49ec-9c77-da00d65eef40" containerName="route-controller-manager" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.490082 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-564674b98b-762xs" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.492011 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-58b9b67b9b-t2rsn"] Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.492313 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.492845 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58b9b67b9b-t2rsn" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.493149 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.493546 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.493718 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.493761 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.493849 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.495656 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.495836 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.497745 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.497844 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.497992 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.498032 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.510845 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-564674b98b-762xs"] Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.519314 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.525146 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-58b9b67b9b-t2rsn"] Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.599517 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-58b9b67b9b-t2rsn"] Nov 25 16:59:36 crc kubenswrapper[4812]: E1125 16:59:36.600089 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[client-ca config kube-api-access-mrfbm proxy-ca-bundles serving-cert], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openshift-controller-manager/controller-manager-58b9b67b9b-t2rsn" podUID="e7e7d120-6b99-4530-8997-2cc482190f83" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.614756 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-564674b98b-762xs"] Nov 25 16:59:36 crc kubenswrapper[4812]: E1125 16:59:36.615218 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[client-ca config kube-api-access-qpwq9 serving-cert], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openshift-route-controller-manager/route-controller-manager-564674b98b-762xs" podUID="0c19beb3-c10a-4ba4-b200-d6c5cf407047" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.632080 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e7e7d120-6b99-4530-8997-2cc482190f83-proxy-ca-bundles\") pod \"controller-manager-58b9b67b9b-t2rsn\" (UID: \"e7e7d120-6b99-4530-8997-2cc482190f83\") " pod="openshift-controller-manager/controller-manager-58b9b67b9b-t2rsn" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.632148 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0c19beb3-c10a-4ba4-b200-d6c5cf407047-client-ca\") pod \"route-controller-manager-564674b98b-762xs\" (UID: \"0c19beb3-c10a-4ba4-b200-d6c5cf407047\") " pod="openshift-route-controller-manager/route-controller-manager-564674b98b-762xs" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.632176 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e7e7d120-6b99-4530-8997-2cc482190f83-client-ca\") pod \"controller-manager-58b9b67b9b-t2rsn\" (UID: \"e7e7d120-6b99-4530-8997-2cc482190f83\") " pod="openshift-controller-manager/controller-manager-58b9b67b9b-t2rsn" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.632257 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qpwq9\" (UniqueName: \"kubernetes.io/projected/0c19beb3-c10a-4ba4-b200-d6c5cf407047-kube-api-access-qpwq9\") pod \"route-controller-manager-564674b98b-762xs\" (UID: \"0c19beb3-c10a-4ba4-b200-d6c5cf407047\") " pod="openshift-route-controller-manager/route-controller-manager-564674b98b-762xs" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.632319 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c19beb3-c10a-4ba4-b200-d6c5cf407047-config\") pod \"route-controller-manager-564674b98b-762xs\" (UID: \"0c19beb3-c10a-4ba4-b200-d6c5cf407047\") " pod="openshift-route-controller-manager/route-controller-manager-564674b98b-762xs" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.632366 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mrfbm\" (UniqueName: \"kubernetes.io/projected/e7e7d120-6b99-4530-8997-2cc482190f83-kube-api-access-mrfbm\") pod \"controller-manager-58b9b67b9b-t2rsn\" (UID: \"e7e7d120-6b99-4530-8997-2cc482190f83\") " pod="openshift-controller-manager/controller-manager-58b9b67b9b-t2rsn" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.632402 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e7d120-6b99-4530-8997-2cc482190f83-config\") pod \"controller-manager-58b9b67b9b-t2rsn\" (UID: \"e7e7d120-6b99-4530-8997-2cc482190f83\") " pod="openshift-controller-manager/controller-manager-58b9b67b9b-t2rsn" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.632628 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0c19beb3-c10a-4ba4-b200-d6c5cf407047-serving-cert\") pod \"route-controller-manager-564674b98b-762xs\" (UID: \"0c19beb3-c10a-4ba4-b200-d6c5cf407047\") " pod="openshift-route-controller-manager/route-controller-manager-564674b98b-762xs" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.632762 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e7d120-6b99-4530-8997-2cc482190f83-serving-cert\") pod \"controller-manager-58b9b67b9b-t2rsn\" (UID: \"e7e7d120-6b99-4530-8997-2cc482190f83\") " pod="openshift-controller-manager/controller-manager-58b9b67b9b-t2rsn" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.734417 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e7e7d120-6b99-4530-8997-2cc482190f83-proxy-ca-bundles\") pod \"controller-manager-58b9b67b9b-t2rsn\" (UID: \"e7e7d120-6b99-4530-8997-2cc482190f83\") " pod="openshift-controller-manager/controller-manager-58b9b67b9b-t2rsn" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.734468 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0c19beb3-c10a-4ba4-b200-d6c5cf407047-client-ca\") pod \"route-controller-manager-564674b98b-762xs\" (UID: \"0c19beb3-c10a-4ba4-b200-d6c5cf407047\") " pod="openshift-route-controller-manager/route-controller-manager-564674b98b-762xs" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.734486 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e7e7d120-6b99-4530-8997-2cc482190f83-client-ca\") pod \"controller-manager-58b9b67b9b-t2rsn\" (UID: \"e7e7d120-6b99-4530-8997-2cc482190f83\") " pod="openshift-controller-manager/controller-manager-58b9b67b9b-t2rsn" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.734547 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qpwq9\" (UniqueName: \"kubernetes.io/projected/0c19beb3-c10a-4ba4-b200-d6c5cf407047-kube-api-access-qpwq9\") pod \"route-controller-manager-564674b98b-762xs\" (UID: \"0c19beb3-c10a-4ba4-b200-d6c5cf407047\") " pod="openshift-route-controller-manager/route-controller-manager-564674b98b-762xs" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.734582 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c19beb3-c10a-4ba4-b200-d6c5cf407047-config\") pod \"route-controller-manager-564674b98b-762xs\" (UID: \"0c19beb3-c10a-4ba4-b200-d6c5cf407047\") " pod="openshift-route-controller-manager/route-controller-manager-564674b98b-762xs" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.734615 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mrfbm\" (UniqueName: \"kubernetes.io/projected/e7e7d120-6b99-4530-8997-2cc482190f83-kube-api-access-mrfbm\") pod \"controller-manager-58b9b67b9b-t2rsn\" (UID: \"e7e7d120-6b99-4530-8997-2cc482190f83\") " pod="openshift-controller-manager/controller-manager-58b9b67b9b-t2rsn" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.734646 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e7d120-6b99-4530-8997-2cc482190f83-config\") pod \"controller-manager-58b9b67b9b-t2rsn\" (UID: \"e7e7d120-6b99-4530-8997-2cc482190f83\") " pod="openshift-controller-manager/controller-manager-58b9b67b9b-t2rsn" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.734682 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0c19beb3-c10a-4ba4-b200-d6c5cf407047-serving-cert\") pod \"route-controller-manager-564674b98b-762xs\" (UID: \"0c19beb3-c10a-4ba4-b200-d6c5cf407047\") " pod="openshift-route-controller-manager/route-controller-manager-564674b98b-762xs" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.734716 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e7d120-6b99-4530-8997-2cc482190f83-serving-cert\") pod \"controller-manager-58b9b67b9b-t2rsn\" (UID: \"e7e7d120-6b99-4530-8997-2cc482190f83\") " pod="openshift-controller-manager/controller-manager-58b9b67b9b-t2rsn" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.735624 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e7e7d120-6b99-4530-8997-2cc482190f83-client-ca\") pod \"controller-manager-58b9b67b9b-t2rsn\" (UID: \"e7e7d120-6b99-4530-8997-2cc482190f83\") " pod="openshift-controller-manager/controller-manager-58b9b67b9b-t2rsn" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.735740 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e7e7d120-6b99-4530-8997-2cc482190f83-proxy-ca-bundles\") pod \"controller-manager-58b9b67b9b-t2rsn\" (UID: \"e7e7d120-6b99-4530-8997-2cc482190f83\") " pod="openshift-controller-manager/controller-manager-58b9b67b9b-t2rsn" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.735858 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c19beb3-c10a-4ba4-b200-d6c5cf407047-config\") pod \"route-controller-manager-564674b98b-762xs\" (UID: \"0c19beb3-c10a-4ba4-b200-d6c5cf407047\") " pod="openshift-route-controller-manager/route-controller-manager-564674b98b-762xs" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.736374 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e7d120-6b99-4530-8997-2cc482190f83-config\") pod \"controller-manager-58b9b67b9b-t2rsn\" (UID: \"e7e7d120-6b99-4530-8997-2cc482190f83\") " pod="openshift-controller-manager/controller-manager-58b9b67b9b-t2rsn" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.736668 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0c19beb3-c10a-4ba4-b200-d6c5cf407047-client-ca\") pod \"route-controller-manager-564674b98b-762xs\" (UID: \"0c19beb3-c10a-4ba4-b200-d6c5cf407047\") " pod="openshift-route-controller-manager/route-controller-manager-564674b98b-762xs" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.738457 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0c19beb3-c10a-4ba4-b200-d6c5cf407047-serving-cert\") pod \"route-controller-manager-564674b98b-762xs\" (UID: \"0c19beb3-c10a-4ba4-b200-d6c5cf407047\") " pod="openshift-route-controller-manager/route-controller-manager-564674b98b-762xs" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.738459 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e7d120-6b99-4530-8997-2cc482190f83-serving-cert\") pod \"controller-manager-58b9b67b9b-t2rsn\" (UID: \"e7e7d120-6b99-4530-8997-2cc482190f83\") " pod="openshift-controller-manager/controller-manager-58b9b67b9b-t2rsn" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.754476 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qpwq9\" (UniqueName: \"kubernetes.io/projected/0c19beb3-c10a-4ba4-b200-d6c5cf407047-kube-api-access-qpwq9\") pod \"route-controller-manager-564674b98b-762xs\" (UID: \"0c19beb3-c10a-4ba4-b200-d6c5cf407047\") " pod="openshift-route-controller-manager/route-controller-manager-564674b98b-762xs" Nov 25 16:59:36 crc kubenswrapper[4812]: I1125 16:59:36.757213 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mrfbm\" (UniqueName: \"kubernetes.io/projected/e7e7d120-6b99-4530-8997-2cc482190f83-kube-api-access-mrfbm\") pod \"controller-manager-58b9b67b9b-t2rsn\" (UID: \"e7e7d120-6b99-4530-8997-2cc482190f83\") " pod="openshift-controller-manager/controller-manager-58b9b67b9b-t2rsn" Nov 25 16:59:37 crc kubenswrapper[4812]: I1125 16:59:37.528761 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-564674b98b-762xs" Nov 25 16:59:37 crc kubenswrapper[4812]: I1125 16:59:37.528798 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58b9b67b9b-t2rsn" Nov 25 16:59:37 crc kubenswrapper[4812]: I1125 16:59:37.538012 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-564674b98b-762xs" Nov 25 16:59:37 crc kubenswrapper[4812]: I1125 16:59:37.541850 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58b9b67b9b-t2rsn" Nov 25 16:59:37 crc kubenswrapper[4812]: I1125 16:59:37.645371 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e7d120-6b99-4530-8997-2cc482190f83-serving-cert\") pod \"e7e7d120-6b99-4530-8997-2cc482190f83\" (UID: \"e7e7d120-6b99-4530-8997-2cc482190f83\") " Nov 25 16:59:37 crc kubenswrapper[4812]: I1125 16:59:37.645458 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e7e7d120-6b99-4530-8997-2cc482190f83-proxy-ca-bundles\") pod \"e7e7d120-6b99-4530-8997-2cc482190f83\" (UID: \"e7e7d120-6b99-4530-8997-2cc482190f83\") " Nov 25 16:59:37 crc kubenswrapper[4812]: I1125 16:59:37.645497 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e7e7d120-6b99-4530-8997-2cc482190f83-client-ca\") pod \"e7e7d120-6b99-4530-8997-2cc482190f83\" (UID: \"e7e7d120-6b99-4530-8997-2cc482190f83\") " Nov 25 16:59:37 crc kubenswrapper[4812]: I1125 16:59:37.645556 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0c19beb3-c10a-4ba4-b200-d6c5cf407047-client-ca\") pod \"0c19beb3-c10a-4ba4-b200-d6c5cf407047\" (UID: \"0c19beb3-c10a-4ba4-b200-d6c5cf407047\") " Nov 25 16:59:37 crc kubenswrapper[4812]: I1125 16:59:37.645599 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mrfbm\" (UniqueName: \"kubernetes.io/projected/e7e7d120-6b99-4530-8997-2cc482190f83-kube-api-access-mrfbm\") pod \"e7e7d120-6b99-4530-8997-2cc482190f83\" (UID: \"e7e7d120-6b99-4530-8997-2cc482190f83\") " Nov 25 16:59:37 crc kubenswrapper[4812]: I1125 16:59:37.645647 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e7d120-6b99-4530-8997-2cc482190f83-config\") pod \"e7e7d120-6b99-4530-8997-2cc482190f83\" (UID: \"e7e7d120-6b99-4530-8997-2cc482190f83\") " Nov 25 16:59:37 crc kubenswrapper[4812]: I1125 16:59:37.645730 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0c19beb3-c10a-4ba4-b200-d6c5cf407047-serving-cert\") pod \"0c19beb3-c10a-4ba4-b200-d6c5cf407047\" (UID: \"0c19beb3-c10a-4ba4-b200-d6c5cf407047\") " Nov 25 16:59:37 crc kubenswrapper[4812]: I1125 16:59:37.645769 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c19beb3-c10a-4ba4-b200-d6c5cf407047-config\") pod \"0c19beb3-c10a-4ba4-b200-d6c5cf407047\" (UID: \"0c19beb3-c10a-4ba4-b200-d6c5cf407047\") " Nov 25 16:59:37 crc kubenswrapper[4812]: I1125 16:59:37.645864 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qpwq9\" (UniqueName: \"kubernetes.io/projected/0c19beb3-c10a-4ba4-b200-d6c5cf407047-kube-api-access-qpwq9\") pod \"0c19beb3-c10a-4ba4-b200-d6c5cf407047\" (UID: \"0c19beb3-c10a-4ba4-b200-d6c5cf407047\") " Nov 25 16:59:37 crc kubenswrapper[4812]: I1125 16:59:37.646077 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e7d120-6b99-4530-8997-2cc482190f83-client-ca" (OuterVolumeSpecName: "client-ca") pod "e7e7d120-6b99-4530-8997-2cc482190f83" (UID: "e7e7d120-6b99-4530-8997-2cc482190f83"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:59:37 crc kubenswrapper[4812]: I1125 16:59:37.646103 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e7d120-6b99-4530-8997-2cc482190f83-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "e7e7d120-6b99-4530-8997-2cc482190f83" (UID: "e7e7d120-6b99-4530-8997-2cc482190f83"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:59:37 crc kubenswrapper[4812]: I1125 16:59:37.646127 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0c19beb3-c10a-4ba4-b200-d6c5cf407047-client-ca" (OuterVolumeSpecName: "client-ca") pod "0c19beb3-c10a-4ba4-b200-d6c5cf407047" (UID: "0c19beb3-c10a-4ba4-b200-d6c5cf407047"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:59:37 crc kubenswrapper[4812]: I1125 16:59:37.646206 4812 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e7e7d120-6b99-4530-8997-2cc482190f83-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 25 16:59:37 crc kubenswrapper[4812]: I1125 16:59:37.646230 4812 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e7e7d120-6b99-4530-8997-2cc482190f83-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 16:59:37 crc kubenswrapper[4812]: I1125 16:59:37.646245 4812 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0c19beb3-c10a-4ba4-b200-d6c5cf407047-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 16:59:37 crc kubenswrapper[4812]: I1125 16:59:37.646405 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e7d120-6b99-4530-8997-2cc482190f83-config" (OuterVolumeSpecName: "config") pod "e7e7d120-6b99-4530-8997-2cc482190f83" (UID: "e7e7d120-6b99-4530-8997-2cc482190f83"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:59:37 crc kubenswrapper[4812]: I1125 16:59:37.646564 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0c19beb3-c10a-4ba4-b200-d6c5cf407047-config" (OuterVolumeSpecName: "config") pod "0c19beb3-c10a-4ba4-b200-d6c5cf407047" (UID: "0c19beb3-c10a-4ba4-b200-d6c5cf407047"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 16:59:37 crc kubenswrapper[4812]: I1125 16:59:37.649353 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e7d120-6b99-4530-8997-2cc482190f83-kube-api-access-mrfbm" (OuterVolumeSpecName: "kube-api-access-mrfbm") pod "e7e7d120-6b99-4530-8997-2cc482190f83" (UID: "e7e7d120-6b99-4530-8997-2cc482190f83"). InnerVolumeSpecName "kube-api-access-mrfbm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:59:37 crc kubenswrapper[4812]: I1125 16:59:37.649370 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c19beb3-c10a-4ba4-b200-d6c5cf407047-kube-api-access-qpwq9" (OuterVolumeSpecName: "kube-api-access-qpwq9") pod "0c19beb3-c10a-4ba4-b200-d6c5cf407047" (UID: "0c19beb3-c10a-4ba4-b200-d6c5cf407047"). InnerVolumeSpecName "kube-api-access-qpwq9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 16:59:37 crc kubenswrapper[4812]: I1125 16:59:37.649433 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c19beb3-c10a-4ba4-b200-d6c5cf407047-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0c19beb3-c10a-4ba4-b200-d6c5cf407047" (UID: "0c19beb3-c10a-4ba4-b200-d6c5cf407047"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:59:37 crc kubenswrapper[4812]: I1125 16:59:37.650020 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e7d120-6b99-4530-8997-2cc482190f83-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e7d120-6b99-4530-8997-2cc482190f83" (UID: "e7e7d120-6b99-4530-8997-2cc482190f83"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 16:59:37 crc kubenswrapper[4812]: I1125 16:59:37.747465 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qpwq9\" (UniqueName: \"kubernetes.io/projected/0c19beb3-c10a-4ba4-b200-d6c5cf407047-kube-api-access-qpwq9\") on node \"crc\" DevicePath \"\"" Nov 25 16:59:37 crc kubenswrapper[4812]: I1125 16:59:37.747501 4812 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e7d120-6b99-4530-8997-2cc482190f83-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 16:59:37 crc kubenswrapper[4812]: I1125 16:59:37.747550 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mrfbm\" (UniqueName: \"kubernetes.io/projected/e7e7d120-6b99-4530-8997-2cc482190f83-kube-api-access-mrfbm\") on node \"crc\" DevicePath \"\"" Nov 25 16:59:37 crc kubenswrapper[4812]: I1125 16:59:37.747561 4812 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e7d120-6b99-4530-8997-2cc482190f83-config\") on node \"crc\" DevicePath \"\"" Nov 25 16:59:37 crc kubenswrapper[4812]: I1125 16:59:37.747574 4812 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0c19beb3-c10a-4ba4-b200-d6c5cf407047-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 16:59:37 crc kubenswrapper[4812]: I1125 16:59:37.747583 4812 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c19beb3-c10a-4ba4-b200-d6c5cf407047-config\") on node \"crc\" DevicePath \"\"" Nov 25 16:59:37 crc kubenswrapper[4812]: I1125 16:59:37.838957 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ae73445-81df-49ec-9c77-da00d65eef40" path="/var/lib/kubelet/pods/6ae73445-81df-49ec-9c77-da00d65eef40/volumes" Nov 25 16:59:38 crc kubenswrapper[4812]: I1125 16:59:38.533315 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58b9b67b9b-t2rsn" Nov 25 16:59:38 crc kubenswrapper[4812]: I1125 16:59:38.533365 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-564674b98b-762xs" Nov 25 16:59:38 crc kubenswrapper[4812]: I1125 16:59:38.563751 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-564674b98b-762xs"] Nov 25 16:59:38 crc kubenswrapper[4812]: I1125 16:59:38.568808 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-77bb6fd864-8n8lw"] Nov 25 16:59:38 crc kubenswrapper[4812]: I1125 16:59:38.569612 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-77bb6fd864-8n8lw" Nov 25 16:59:38 crc kubenswrapper[4812]: I1125 16:59:38.572951 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-564674b98b-762xs"] Nov 25 16:59:38 crc kubenswrapper[4812]: I1125 16:59:38.574230 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 25 16:59:38 crc kubenswrapper[4812]: I1125 16:59:38.574300 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 25 16:59:38 crc kubenswrapper[4812]: I1125 16:59:38.574437 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 25 16:59:38 crc kubenswrapper[4812]: I1125 16:59:38.574609 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 25 16:59:38 crc kubenswrapper[4812]: I1125 16:59:38.574737 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 25 16:59:38 crc kubenswrapper[4812]: I1125 16:59:38.575788 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 25 16:59:38 crc kubenswrapper[4812]: I1125 16:59:38.598283 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-77bb6fd864-8n8lw"] Nov 25 16:59:38 crc kubenswrapper[4812]: I1125 16:59:38.605787 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-58b9b67b9b-t2rsn"] Nov 25 16:59:38 crc kubenswrapper[4812]: I1125 16:59:38.610185 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-58b9b67b9b-t2rsn"] Nov 25 16:59:38 crc kubenswrapper[4812]: I1125 16:59:38.658251 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a868758a-7763-4bd0-b649-ae3977866c0c-config\") pod \"route-controller-manager-77bb6fd864-8n8lw\" (UID: \"a868758a-7763-4bd0-b649-ae3977866c0c\") " pod="openshift-route-controller-manager/route-controller-manager-77bb6fd864-8n8lw" Nov 25 16:59:38 crc kubenswrapper[4812]: I1125 16:59:38.658307 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2cdbb\" (UniqueName: \"kubernetes.io/projected/a868758a-7763-4bd0-b649-ae3977866c0c-kube-api-access-2cdbb\") pod \"route-controller-manager-77bb6fd864-8n8lw\" (UID: \"a868758a-7763-4bd0-b649-ae3977866c0c\") " pod="openshift-route-controller-manager/route-controller-manager-77bb6fd864-8n8lw" Nov 25 16:59:38 crc kubenswrapper[4812]: I1125 16:59:38.658355 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a868758a-7763-4bd0-b649-ae3977866c0c-serving-cert\") pod \"route-controller-manager-77bb6fd864-8n8lw\" (UID: \"a868758a-7763-4bd0-b649-ae3977866c0c\") " pod="openshift-route-controller-manager/route-controller-manager-77bb6fd864-8n8lw" Nov 25 16:59:38 crc kubenswrapper[4812]: I1125 16:59:38.658402 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a868758a-7763-4bd0-b649-ae3977866c0c-client-ca\") pod \"route-controller-manager-77bb6fd864-8n8lw\" (UID: \"a868758a-7763-4bd0-b649-ae3977866c0c\") " pod="openshift-route-controller-manager/route-controller-manager-77bb6fd864-8n8lw" Nov 25 16:59:38 crc kubenswrapper[4812]: I1125 16:59:38.759703 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a868758a-7763-4bd0-b649-ae3977866c0c-config\") pod \"route-controller-manager-77bb6fd864-8n8lw\" (UID: \"a868758a-7763-4bd0-b649-ae3977866c0c\") " pod="openshift-route-controller-manager/route-controller-manager-77bb6fd864-8n8lw" Nov 25 16:59:38 crc kubenswrapper[4812]: I1125 16:59:38.759745 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2cdbb\" (UniqueName: \"kubernetes.io/projected/a868758a-7763-4bd0-b649-ae3977866c0c-kube-api-access-2cdbb\") pod \"route-controller-manager-77bb6fd864-8n8lw\" (UID: \"a868758a-7763-4bd0-b649-ae3977866c0c\") " pod="openshift-route-controller-manager/route-controller-manager-77bb6fd864-8n8lw" Nov 25 16:59:38 crc kubenswrapper[4812]: I1125 16:59:38.759801 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a868758a-7763-4bd0-b649-ae3977866c0c-serving-cert\") pod \"route-controller-manager-77bb6fd864-8n8lw\" (UID: \"a868758a-7763-4bd0-b649-ae3977866c0c\") " pod="openshift-route-controller-manager/route-controller-manager-77bb6fd864-8n8lw" Nov 25 16:59:38 crc kubenswrapper[4812]: I1125 16:59:38.759829 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a868758a-7763-4bd0-b649-ae3977866c0c-client-ca\") pod \"route-controller-manager-77bb6fd864-8n8lw\" (UID: \"a868758a-7763-4bd0-b649-ae3977866c0c\") " pod="openshift-route-controller-manager/route-controller-manager-77bb6fd864-8n8lw" Nov 25 16:59:38 crc kubenswrapper[4812]: I1125 16:59:38.760812 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a868758a-7763-4bd0-b649-ae3977866c0c-client-ca\") pod \"route-controller-manager-77bb6fd864-8n8lw\" (UID: \"a868758a-7763-4bd0-b649-ae3977866c0c\") " pod="openshift-route-controller-manager/route-controller-manager-77bb6fd864-8n8lw" Nov 25 16:59:38 crc kubenswrapper[4812]: I1125 16:59:38.761181 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a868758a-7763-4bd0-b649-ae3977866c0c-config\") pod \"route-controller-manager-77bb6fd864-8n8lw\" (UID: \"a868758a-7763-4bd0-b649-ae3977866c0c\") " pod="openshift-route-controller-manager/route-controller-manager-77bb6fd864-8n8lw" Nov 25 16:59:38 crc kubenswrapper[4812]: I1125 16:59:38.765310 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a868758a-7763-4bd0-b649-ae3977866c0c-serving-cert\") pod \"route-controller-manager-77bb6fd864-8n8lw\" (UID: \"a868758a-7763-4bd0-b649-ae3977866c0c\") " pod="openshift-route-controller-manager/route-controller-manager-77bb6fd864-8n8lw" Nov 25 16:59:38 crc kubenswrapper[4812]: I1125 16:59:38.780104 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2cdbb\" (UniqueName: \"kubernetes.io/projected/a868758a-7763-4bd0-b649-ae3977866c0c-kube-api-access-2cdbb\") pod \"route-controller-manager-77bb6fd864-8n8lw\" (UID: \"a868758a-7763-4bd0-b649-ae3977866c0c\") " pod="openshift-route-controller-manager/route-controller-manager-77bb6fd864-8n8lw" Nov 25 16:59:38 crc kubenswrapper[4812]: I1125 16:59:38.888827 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-77bb6fd864-8n8lw" Nov 25 16:59:39 crc kubenswrapper[4812]: I1125 16:59:39.183834 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-77bb6fd864-8n8lw"] Nov 25 16:59:39 crc kubenswrapper[4812]: W1125 16:59:39.192031 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda868758a_7763_4bd0_b649_ae3977866c0c.slice/crio-5c51e930dd09cfa22d433d9e3a0a6c5ee781bce4957df747ae40050684f08d5f WatchSource:0}: Error finding container 5c51e930dd09cfa22d433d9e3a0a6c5ee781bce4957df747ae40050684f08d5f: Status 404 returned error can't find the container with id 5c51e930dd09cfa22d433d9e3a0a6c5ee781bce4957df747ae40050684f08d5f Nov 25 16:59:39 crc kubenswrapper[4812]: I1125 16:59:39.541491 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-77bb6fd864-8n8lw" event={"ID":"a868758a-7763-4bd0-b649-ae3977866c0c","Type":"ContainerStarted","Data":"0628238ca3d7d9799d8efafa24aae50c3ccbcf0bfbde6ae38097ba1c461728cd"} Nov 25 16:59:39 crc kubenswrapper[4812]: I1125 16:59:39.541563 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-77bb6fd864-8n8lw" event={"ID":"a868758a-7763-4bd0-b649-ae3977866c0c","Type":"ContainerStarted","Data":"5c51e930dd09cfa22d433d9e3a0a6c5ee781bce4957df747ae40050684f08d5f"} Nov 25 16:59:39 crc kubenswrapper[4812]: I1125 16:59:39.542152 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-77bb6fd864-8n8lw" Nov 25 16:59:39 crc kubenswrapper[4812]: I1125 16:59:39.839849 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0c19beb3-c10a-4ba4-b200-d6c5cf407047" path="/var/lib/kubelet/pods/0c19beb3-c10a-4ba4-b200-d6c5cf407047/volumes" Nov 25 16:59:39 crc kubenswrapper[4812]: I1125 16:59:39.840726 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e7d120-6b99-4530-8997-2cc482190f83" path="/var/lib/kubelet/pods/e7e7d120-6b99-4530-8997-2cc482190f83/volumes" Nov 25 16:59:39 crc kubenswrapper[4812]: I1125 16:59:39.875145 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-77bb6fd864-8n8lw" Nov 25 16:59:39 crc kubenswrapper[4812]: I1125 16:59:39.894785 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-77bb6fd864-8n8lw" podStartSLOduration=3.894756095 podStartE2EDuration="3.894756095s" podCreationTimestamp="2025-11-25 16:59:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:59:39.556581747 +0000 UTC m=+754.396723852" watchObservedRunningTime="2025-11-25 16:59:39.894756095 +0000 UTC m=+754.734898190" Nov 25 16:59:41 crc kubenswrapper[4812]: I1125 16:59:41.499629 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-76cff95464-dcr8c"] Nov 25 16:59:41 crc kubenswrapper[4812]: I1125 16:59:41.500713 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-76cff95464-dcr8c" Nov 25 16:59:41 crc kubenswrapper[4812]: I1125 16:59:41.505351 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 25 16:59:41 crc kubenswrapper[4812]: I1125 16:59:41.505372 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 25 16:59:41 crc kubenswrapper[4812]: I1125 16:59:41.505582 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 25 16:59:41 crc kubenswrapper[4812]: I1125 16:59:41.505670 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 25 16:59:41 crc kubenswrapper[4812]: I1125 16:59:41.505805 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 25 16:59:41 crc kubenswrapper[4812]: I1125 16:59:41.505922 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 25 16:59:41 crc kubenswrapper[4812]: I1125 16:59:41.510127 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 25 16:59:41 crc kubenswrapper[4812]: I1125 16:59:41.511857 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-76cff95464-dcr8c"] Nov 25 16:59:41 crc kubenswrapper[4812]: I1125 16:59:41.602383 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-btbtm\" (UniqueName: \"kubernetes.io/projected/a0f486b1-19ae-4c2e-84ec-8eec8e48b034-kube-api-access-btbtm\") pod \"controller-manager-76cff95464-dcr8c\" (UID: \"a0f486b1-19ae-4c2e-84ec-8eec8e48b034\") " pod="openshift-controller-manager/controller-manager-76cff95464-dcr8c" Nov 25 16:59:41 crc kubenswrapper[4812]: I1125 16:59:41.602469 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0f486b1-19ae-4c2e-84ec-8eec8e48b034-config\") pod \"controller-manager-76cff95464-dcr8c\" (UID: \"a0f486b1-19ae-4c2e-84ec-8eec8e48b034\") " pod="openshift-controller-manager/controller-manager-76cff95464-dcr8c" Nov 25 16:59:41 crc kubenswrapper[4812]: I1125 16:59:41.602567 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a0f486b1-19ae-4c2e-84ec-8eec8e48b034-proxy-ca-bundles\") pod \"controller-manager-76cff95464-dcr8c\" (UID: \"a0f486b1-19ae-4c2e-84ec-8eec8e48b034\") " pod="openshift-controller-manager/controller-manager-76cff95464-dcr8c" Nov 25 16:59:41 crc kubenswrapper[4812]: I1125 16:59:41.602594 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a0f486b1-19ae-4c2e-84ec-8eec8e48b034-client-ca\") pod \"controller-manager-76cff95464-dcr8c\" (UID: \"a0f486b1-19ae-4c2e-84ec-8eec8e48b034\") " pod="openshift-controller-manager/controller-manager-76cff95464-dcr8c" Nov 25 16:59:41 crc kubenswrapper[4812]: I1125 16:59:41.602678 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a0f486b1-19ae-4c2e-84ec-8eec8e48b034-serving-cert\") pod \"controller-manager-76cff95464-dcr8c\" (UID: \"a0f486b1-19ae-4c2e-84ec-8eec8e48b034\") " pod="openshift-controller-manager/controller-manager-76cff95464-dcr8c" Nov 25 16:59:41 crc kubenswrapper[4812]: I1125 16:59:41.704116 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-btbtm\" (UniqueName: \"kubernetes.io/projected/a0f486b1-19ae-4c2e-84ec-8eec8e48b034-kube-api-access-btbtm\") pod \"controller-manager-76cff95464-dcr8c\" (UID: \"a0f486b1-19ae-4c2e-84ec-8eec8e48b034\") " pod="openshift-controller-manager/controller-manager-76cff95464-dcr8c" Nov 25 16:59:41 crc kubenswrapper[4812]: I1125 16:59:41.704167 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0f486b1-19ae-4c2e-84ec-8eec8e48b034-config\") pod \"controller-manager-76cff95464-dcr8c\" (UID: \"a0f486b1-19ae-4c2e-84ec-8eec8e48b034\") " pod="openshift-controller-manager/controller-manager-76cff95464-dcr8c" Nov 25 16:59:41 crc kubenswrapper[4812]: I1125 16:59:41.704209 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a0f486b1-19ae-4c2e-84ec-8eec8e48b034-proxy-ca-bundles\") pod \"controller-manager-76cff95464-dcr8c\" (UID: \"a0f486b1-19ae-4c2e-84ec-8eec8e48b034\") " pod="openshift-controller-manager/controller-manager-76cff95464-dcr8c" Nov 25 16:59:41 crc kubenswrapper[4812]: I1125 16:59:41.704233 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a0f486b1-19ae-4c2e-84ec-8eec8e48b034-client-ca\") pod \"controller-manager-76cff95464-dcr8c\" (UID: \"a0f486b1-19ae-4c2e-84ec-8eec8e48b034\") " pod="openshift-controller-manager/controller-manager-76cff95464-dcr8c" Nov 25 16:59:41 crc kubenswrapper[4812]: I1125 16:59:41.704266 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a0f486b1-19ae-4c2e-84ec-8eec8e48b034-serving-cert\") pod \"controller-manager-76cff95464-dcr8c\" (UID: \"a0f486b1-19ae-4c2e-84ec-8eec8e48b034\") " pod="openshift-controller-manager/controller-manager-76cff95464-dcr8c" Nov 25 16:59:41 crc kubenswrapper[4812]: I1125 16:59:41.705419 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a0f486b1-19ae-4c2e-84ec-8eec8e48b034-client-ca\") pod \"controller-manager-76cff95464-dcr8c\" (UID: \"a0f486b1-19ae-4c2e-84ec-8eec8e48b034\") " pod="openshift-controller-manager/controller-manager-76cff95464-dcr8c" Nov 25 16:59:41 crc kubenswrapper[4812]: I1125 16:59:41.705435 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a0f486b1-19ae-4c2e-84ec-8eec8e48b034-proxy-ca-bundles\") pod \"controller-manager-76cff95464-dcr8c\" (UID: \"a0f486b1-19ae-4c2e-84ec-8eec8e48b034\") " pod="openshift-controller-manager/controller-manager-76cff95464-dcr8c" Nov 25 16:59:41 crc kubenswrapper[4812]: I1125 16:59:41.705566 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0f486b1-19ae-4c2e-84ec-8eec8e48b034-config\") pod \"controller-manager-76cff95464-dcr8c\" (UID: \"a0f486b1-19ae-4c2e-84ec-8eec8e48b034\") " pod="openshift-controller-manager/controller-manager-76cff95464-dcr8c" Nov 25 16:59:41 crc kubenswrapper[4812]: I1125 16:59:41.709838 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a0f486b1-19ae-4c2e-84ec-8eec8e48b034-serving-cert\") pod \"controller-manager-76cff95464-dcr8c\" (UID: \"a0f486b1-19ae-4c2e-84ec-8eec8e48b034\") " pod="openshift-controller-manager/controller-manager-76cff95464-dcr8c" Nov 25 16:59:41 crc kubenswrapper[4812]: I1125 16:59:41.719633 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-btbtm\" (UniqueName: \"kubernetes.io/projected/a0f486b1-19ae-4c2e-84ec-8eec8e48b034-kube-api-access-btbtm\") pod \"controller-manager-76cff95464-dcr8c\" (UID: \"a0f486b1-19ae-4c2e-84ec-8eec8e48b034\") " pod="openshift-controller-manager/controller-manager-76cff95464-dcr8c" Nov 25 16:59:41 crc kubenswrapper[4812]: I1125 16:59:41.819787 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-76cff95464-dcr8c" Nov 25 16:59:41 crc kubenswrapper[4812]: I1125 16:59:41.997676 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-76cff95464-dcr8c"] Nov 25 16:59:42 crc kubenswrapper[4812]: W1125 16:59:42.006756 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda0f486b1_19ae_4c2e_84ec_8eec8e48b034.slice/crio-eeaeb0bd7d75f3858a4a651133bac3f02bee79848484c269ffcb409badbfa465 WatchSource:0}: Error finding container eeaeb0bd7d75f3858a4a651133bac3f02bee79848484c269ffcb409badbfa465: Status 404 returned error can't find the container with id eeaeb0bd7d75f3858a4a651133bac3f02bee79848484c269ffcb409badbfa465 Nov 25 16:59:42 crc kubenswrapper[4812]: I1125 16:59:42.563610 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-76cff95464-dcr8c" event={"ID":"a0f486b1-19ae-4c2e-84ec-8eec8e48b034","Type":"ContainerStarted","Data":"cf3d9c33cf8f76f44ed9ec5a9d81ca5829911daf9379e1a6a4a07e1d550c82c7"} Nov 25 16:59:42 crc kubenswrapper[4812]: I1125 16:59:42.564209 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-76cff95464-dcr8c" Nov 25 16:59:42 crc kubenswrapper[4812]: I1125 16:59:42.564226 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-76cff95464-dcr8c" event={"ID":"a0f486b1-19ae-4c2e-84ec-8eec8e48b034","Type":"ContainerStarted","Data":"eeaeb0bd7d75f3858a4a651133bac3f02bee79848484c269ffcb409badbfa465"} Nov 25 16:59:42 crc kubenswrapper[4812]: I1125 16:59:42.573465 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-76cff95464-dcr8c" Nov 25 16:59:42 crc kubenswrapper[4812]: I1125 16:59:42.587658 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-76cff95464-dcr8c" podStartSLOduration=6.587633262 podStartE2EDuration="6.587633262s" podCreationTimestamp="2025-11-25 16:59:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:59:42.584824614 +0000 UTC m=+757.424966719" watchObservedRunningTime="2025-11-25 16:59:42.587633262 +0000 UTC m=+757.427775377" Nov 25 16:59:42 crc kubenswrapper[4812]: I1125 16:59:42.629412 4812 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 25 16:59:54 crc kubenswrapper[4812]: I1125 16:59:54.467681 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-6f7b877f74-qcc8n" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.177930 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-4qjc2"] Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.180731 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-4qjc2" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.182599 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-zp6lc"] Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.183304 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-zp6lc" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.189717 4812 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.189717 4812 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.189725 4812 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-sl8pv" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.189782 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.212637 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-zp6lc"] Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.292839 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/9ed09a4c-ab92-4003-8fc0-5fd406d6abd6-reloader\") pod \"frr-k8s-4qjc2\" (UID: \"9ed09a4c-ab92-4003-8fc0-5fd406d6abd6\") " pod="metallb-system/frr-k8s-4qjc2" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.292951 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ckzjz\" (UniqueName: \"kubernetes.io/projected/9ed09a4c-ab92-4003-8fc0-5fd406d6abd6-kube-api-access-ckzjz\") pod \"frr-k8s-4qjc2\" (UID: \"9ed09a4c-ab92-4003-8fc0-5fd406d6abd6\") " pod="metallb-system/frr-k8s-4qjc2" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.292992 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/9ed09a4c-ab92-4003-8fc0-5fd406d6abd6-metrics\") pod \"frr-k8s-4qjc2\" (UID: \"9ed09a4c-ab92-4003-8fc0-5fd406d6abd6\") " pod="metallb-system/frr-k8s-4qjc2" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.293037 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/9ed09a4c-ab92-4003-8fc0-5fd406d6abd6-frr-startup\") pod \"frr-k8s-4qjc2\" (UID: \"9ed09a4c-ab92-4003-8fc0-5fd406d6abd6\") " pod="metallb-system/frr-k8s-4qjc2" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.293056 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/9ed09a4c-ab92-4003-8fc0-5fd406d6abd6-frr-conf\") pod \"frr-k8s-4qjc2\" (UID: \"9ed09a4c-ab92-4003-8fc0-5fd406d6abd6\") " pod="metallb-system/frr-k8s-4qjc2" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.293073 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/9ed09a4c-ab92-4003-8fc0-5fd406d6abd6-frr-sockets\") pod \"frr-k8s-4qjc2\" (UID: \"9ed09a4c-ab92-4003-8fc0-5fd406d6abd6\") " pod="metallb-system/frr-k8s-4qjc2" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.293267 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/682b6133-49c0-4ef7-8756-7939c15f4fd2-cert\") pod \"frr-k8s-webhook-server-6998585d5-zp6lc\" (UID: \"682b6133-49c0-4ef7-8756-7939c15f4fd2\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-zp6lc" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.293325 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2xwpw\" (UniqueName: \"kubernetes.io/projected/682b6133-49c0-4ef7-8756-7939c15f4fd2-kube-api-access-2xwpw\") pod \"frr-k8s-webhook-server-6998585d5-zp6lc\" (UID: \"682b6133-49c0-4ef7-8756-7939c15f4fd2\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-zp6lc" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.293372 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9ed09a4c-ab92-4003-8fc0-5fd406d6abd6-metrics-certs\") pod \"frr-k8s-4qjc2\" (UID: \"9ed09a4c-ab92-4003-8fc0-5fd406d6abd6\") " pod="metallb-system/frr-k8s-4qjc2" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.321257 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-xn4v2"] Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.322508 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-xn4v2" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.325197 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.326370 4812 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.330172 4812 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.332006 4812 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-h2ltc" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.349712 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6c7b4b5f48-d9zhl"] Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.350937 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-d9zhl" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.354024 4812 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.366068 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-d9zhl"] Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.394850 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2xwpw\" (UniqueName: \"kubernetes.io/projected/682b6133-49c0-4ef7-8756-7939c15f4fd2-kube-api-access-2xwpw\") pod \"frr-k8s-webhook-server-6998585d5-zp6lc\" (UID: \"682b6133-49c0-4ef7-8756-7939c15f4fd2\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-zp6lc" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.394914 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9ed09a4c-ab92-4003-8fc0-5fd406d6abd6-metrics-certs\") pod \"frr-k8s-4qjc2\" (UID: \"9ed09a4c-ab92-4003-8fc0-5fd406d6abd6\") " pod="metallb-system/frr-k8s-4qjc2" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.394945 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/9ed09a4c-ab92-4003-8fc0-5fd406d6abd6-reloader\") pod \"frr-k8s-4qjc2\" (UID: \"9ed09a4c-ab92-4003-8fc0-5fd406d6abd6\") " pod="metallb-system/frr-k8s-4qjc2" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.395007 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/42a752b2-b9a9-4793-9ce3-1ec7686d1dfc-metrics-certs\") pod \"speaker-xn4v2\" (UID: \"42a752b2-b9a9-4793-9ce3-1ec7686d1dfc\") " pod="metallb-system/speaker-xn4v2" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.395046 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/42a752b2-b9a9-4793-9ce3-1ec7686d1dfc-memberlist\") pod \"speaker-xn4v2\" (UID: \"42a752b2-b9a9-4793-9ce3-1ec7686d1dfc\") " pod="metallb-system/speaker-xn4v2" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.395067 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w72jh\" (UniqueName: \"kubernetes.io/projected/42a752b2-b9a9-4793-9ce3-1ec7686d1dfc-kube-api-access-w72jh\") pod \"speaker-xn4v2\" (UID: \"42a752b2-b9a9-4793-9ce3-1ec7686d1dfc\") " pod="metallb-system/speaker-xn4v2" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.395085 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ckzjz\" (UniqueName: \"kubernetes.io/projected/9ed09a4c-ab92-4003-8fc0-5fd406d6abd6-kube-api-access-ckzjz\") pod \"frr-k8s-4qjc2\" (UID: \"9ed09a4c-ab92-4003-8fc0-5fd406d6abd6\") " pod="metallb-system/frr-k8s-4qjc2" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.395273 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/9ed09a4c-ab92-4003-8fc0-5fd406d6abd6-metrics\") pod \"frr-k8s-4qjc2\" (UID: \"9ed09a4c-ab92-4003-8fc0-5fd406d6abd6\") " pod="metallb-system/frr-k8s-4qjc2" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.395332 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/42a752b2-b9a9-4793-9ce3-1ec7686d1dfc-metallb-excludel2\") pod \"speaker-xn4v2\" (UID: \"42a752b2-b9a9-4793-9ce3-1ec7686d1dfc\") " pod="metallb-system/speaker-xn4v2" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.395412 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/9ed09a4c-ab92-4003-8fc0-5fd406d6abd6-frr-startup\") pod \"frr-k8s-4qjc2\" (UID: \"9ed09a4c-ab92-4003-8fc0-5fd406d6abd6\") " pod="metallb-system/frr-k8s-4qjc2" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.395449 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/9ed09a4c-ab92-4003-8fc0-5fd406d6abd6-frr-conf\") pod \"frr-k8s-4qjc2\" (UID: \"9ed09a4c-ab92-4003-8fc0-5fd406d6abd6\") " pod="metallb-system/frr-k8s-4qjc2" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.395472 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/9ed09a4c-ab92-4003-8fc0-5fd406d6abd6-frr-sockets\") pod \"frr-k8s-4qjc2\" (UID: \"9ed09a4c-ab92-4003-8fc0-5fd406d6abd6\") " pod="metallb-system/frr-k8s-4qjc2" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.395495 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/682b6133-49c0-4ef7-8756-7939c15f4fd2-cert\") pod \"frr-k8s-webhook-server-6998585d5-zp6lc\" (UID: \"682b6133-49c0-4ef7-8756-7939c15f4fd2\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-zp6lc" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.395570 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/9ed09a4c-ab92-4003-8fc0-5fd406d6abd6-reloader\") pod \"frr-k8s-4qjc2\" (UID: \"9ed09a4c-ab92-4003-8fc0-5fd406d6abd6\") " pod="metallb-system/frr-k8s-4qjc2" Nov 25 16:59:55 crc kubenswrapper[4812]: E1125 16:59:55.395662 4812 secret.go:188] Couldn't get secret metallb-system/frr-k8s-webhook-server-cert: secret "frr-k8s-webhook-server-cert" not found Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.395708 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/9ed09a4c-ab92-4003-8fc0-5fd406d6abd6-metrics\") pod \"frr-k8s-4qjc2\" (UID: \"9ed09a4c-ab92-4003-8fc0-5fd406d6abd6\") " pod="metallb-system/frr-k8s-4qjc2" Nov 25 16:59:55 crc kubenswrapper[4812]: E1125 16:59:55.395732 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/682b6133-49c0-4ef7-8756-7939c15f4fd2-cert podName:682b6133-49c0-4ef7-8756-7939c15f4fd2 nodeName:}" failed. No retries permitted until 2025-11-25 16:59:55.895706974 +0000 UTC m=+770.735849069 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/682b6133-49c0-4ef7-8756-7939c15f4fd2-cert") pod "frr-k8s-webhook-server-6998585d5-zp6lc" (UID: "682b6133-49c0-4ef7-8756-7939c15f4fd2") : secret "frr-k8s-webhook-server-cert" not found Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.395933 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/9ed09a4c-ab92-4003-8fc0-5fd406d6abd6-frr-conf\") pod \"frr-k8s-4qjc2\" (UID: \"9ed09a4c-ab92-4003-8fc0-5fd406d6abd6\") " pod="metallb-system/frr-k8s-4qjc2" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.396206 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/9ed09a4c-ab92-4003-8fc0-5fd406d6abd6-frr-startup\") pod \"frr-k8s-4qjc2\" (UID: \"9ed09a4c-ab92-4003-8fc0-5fd406d6abd6\") " pod="metallb-system/frr-k8s-4qjc2" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.401701 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9ed09a4c-ab92-4003-8fc0-5fd406d6abd6-metrics-certs\") pod \"frr-k8s-4qjc2\" (UID: \"9ed09a4c-ab92-4003-8fc0-5fd406d6abd6\") " pod="metallb-system/frr-k8s-4qjc2" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.412012 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2xwpw\" (UniqueName: \"kubernetes.io/projected/682b6133-49c0-4ef7-8756-7939c15f4fd2-kube-api-access-2xwpw\") pod \"frr-k8s-webhook-server-6998585d5-zp6lc\" (UID: \"682b6133-49c0-4ef7-8756-7939c15f4fd2\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-zp6lc" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.412047 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ckzjz\" (UniqueName: \"kubernetes.io/projected/9ed09a4c-ab92-4003-8fc0-5fd406d6abd6-kube-api-access-ckzjz\") pod \"frr-k8s-4qjc2\" (UID: \"9ed09a4c-ab92-4003-8fc0-5fd406d6abd6\") " pod="metallb-system/frr-k8s-4qjc2" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.444963 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/9ed09a4c-ab92-4003-8fc0-5fd406d6abd6-frr-sockets\") pod \"frr-k8s-4qjc2\" (UID: \"9ed09a4c-ab92-4003-8fc0-5fd406d6abd6\") " pod="metallb-system/frr-k8s-4qjc2" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.497180 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/42a752b2-b9a9-4793-9ce3-1ec7686d1dfc-metrics-certs\") pod \"speaker-xn4v2\" (UID: \"42a752b2-b9a9-4793-9ce3-1ec7686d1dfc\") " pod="metallb-system/speaker-xn4v2" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.497237 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/42a752b2-b9a9-4793-9ce3-1ec7686d1dfc-memberlist\") pod \"speaker-xn4v2\" (UID: \"42a752b2-b9a9-4793-9ce3-1ec7686d1dfc\") " pod="metallb-system/speaker-xn4v2" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.497260 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w72jh\" (UniqueName: \"kubernetes.io/projected/42a752b2-b9a9-4793-9ce3-1ec7686d1dfc-kube-api-access-w72jh\") pod \"speaker-xn4v2\" (UID: \"42a752b2-b9a9-4793-9ce3-1ec7686d1dfc\") " pod="metallb-system/speaker-xn4v2" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.497299 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/42a752b2-b9a9-4793-9ce3-1ec7686d1dfc-metallb-excludel2\") pod \"speaker-xn4v2\" (UID: \"42a752b2-b9a9-4793-9ce3-1ec7686d1dfc\") " pod="metallb-system/speaker-xn4v2" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.497330 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/afe617cc-82c1-43f2-9978-316b5672f476-metrics-certs\") pod \"controller-6c7b4b5f48-d9zhl\" (UID: \"afe617cc-82c1-43f2-9978-316b5672f476\") " pod="metallb-system/controller-6c7b4b5f48-d9zhl" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.497346 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/afe617cc-82c1-43f2-9978-316b5672f476-cert\") pod \"controller-6c7b4b5f48-d9zhl\" (UID: \"afe617cc-82c1-43f2-9978-316b5672f476\") " pod="metallb-system/controller-6c7b4b5f48-d9zhl" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.497363 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2pkq\" (UniqueName: \"kubernetes.io/projected/afe617cc-82c1-43f2-9978-316b5672f476-kube-api-access-t2pkq\") pod \"controller-6c7b4b5f48-d9zhl\" (UID: \"afe617cc-82c1-43f2-9978-316b5672f476\") " pod="metallb-system/controller-6c7b4b5f48-d9zhl" Nov 25 16:59:55 crc kubenswrapper[4812]: E1125 16:59:55.497585 4812 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 25 16:59:55 crc kubenswrapper[4812]: E1125 16:59:55.497638 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/42a752b2-b9a9-4793-9ce3-1ec7686d1dfc-memberlist podName:42a752b2-b9a9-4793-9ce3-1ec7686d1dfc nodeName:}" failed. No retries permitted until 2025-11-25 16:59:55.997618672 +0000 UTC m=+770.837760767 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/42a752b2-b9a9-4793-9ce3-1ec7686d1dfc-memberlist") pod "speaker-xn4v2" (UID: "42a752b2-b9a9-4793-9ce3-1ec7686d1dfc") : secret "metallb-memberlist" not found Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.498138 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-4qjc2" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.498464 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/42a752b2-b9a9-4793-9ce3-1ec7686d1dfc-metallb-excludel2\") pod \"speaker-xn4v2\" (UID: \"42a752b2-b9a9-4793-9ce3-1ec7686d1dfc\") " pod="metallb-system/speaker-xn4v2" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.500966 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/42a752b2-b9a9-4793-9ce3-1ec7686d1dfc-metrics-certs\") pod \"speaker-xn4v2\" (UID: \"42a752b2-b9a9-4793-9ce3-1ec7686d1dfc\") " pod="metallb-system/speaker-xn4v2" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.525792 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w72jh\" (UniqueName: \"kubernetes.io/projected/42a752b2-b9a9-4793-9ce3-1ec7686d1dfc-kube-api-access-w72jh\") pod \"speaker-xn4v2\" (UID: \"42a752b2-b9a9-4793-9ce3-1ec7686d1dfc\") " pod="metallb-system/speaker-xn4v2" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.598633 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/afe617cc-82c1-43f2-9978-316b5672f476-metrics-certs\") pod \"controller-6c7b4b5f48-d9zhl\" (UID: \"afe617cc-82c1-43f2-9978-316b5672f476\") " pod="metallb-system/controller-6c7b4b5f48-d9zhl" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.598678 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/afe617cc-82c1-43f2-9978-316b5672f476-cert\") pod \"controller-6c7b4b5f48-d9zhl\" (UID: \"afe617cc-82c1-43f2-9978-316b5672f476\") " pod="metallb-system/controller-6c7b4b5f48-d9zhl" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.598699 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2pkq\" (UniqueName: \"kubernetes.io/projected/afe617cc-82c1-43f2-9978-316b5672f476-kube-api-access-t2pkq\") pod \"controller-6c7b4b5f48-d9zhl\" (UID: \"afe617cc-82c1-43f2-9978-316b5672f476\") " pod="metallb-system/controller-6c7b4b5f48-d9zhl" Nov 25 16:59:55 crc kubenswrapper[4812]: E1125 16:59:55.598936 4812 secret.go:188] Couldn't get secret metallb-system/controller-certs-secret: secret "controller-certs-secret" not found Nov 25 16:59:55 crc kubenswrapper[4812]: E1125 16:59:55.599052 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/afe617cc-82c1-43f2-9978-316b5672f476-metrics-certs podName:afe617cc-82c1-43f2-9978-316b5672f476 nodeName:}" failed. No retries permitted until 2025-11-25 16:59:56.099023827 +0000 UTC m=+770.939165972 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/afe617cc-82c1-43f2-9978-316b5672f476-metrics-certs") pod "controller-6c7b4b5f48-d9zhl" (UID: "afe617cc-82c1-43f2-9978-316b5672f476") : secret "controller-certs-secret" not found Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.601516 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/afe617cc-82c1-43f2-9978-316b5672f476-cert\") pod \"controller-6c7b4b5f48-d9zhl\" (UID: \"afe617cc-82c1-43f2-9978-316b5672f476\") " pod="metallb-system/controller-6c7b4b5f48-d9zhl" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.621055 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2pkq\" (UniqueName: \"kubernetes.io/projected/afe617cc-82c1-43f2-9978-316b5672f476-kube-api-access-t2pkq\") pod \"controller-6c7b4b5f48-d9zhl\" (UID: \"afe617cc-82c1-43f2-9978-316b5672f476\") " pod="metallb-system/controller-6c7b4b5f48-d9zhl" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.903210 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/682b6133-49c0-4ef7-8756-7939c15f4fd2-cert\") pod \"frr-k8s-webhook-server-6998585d5-zp6lc\" (UID: \"682b6133-49c0-4ef7-8756-7939c15f4fd2\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-zp6lc" Nov 25 16:59:55 crc kubenswrapper[4812]: I1125 16:59:55.908631 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/682b6133-49c0-4ef7-8756-7939c15f4fd2-cert\") pod \"frr-k8s-webhook-server-6998585d5-zp6lc\" (UID: \"682b6133-49c0-4ef7-8756-7939c15f4fd2\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-zp6lc" Nov 25 16:59:56 crc kubenswrapper[4812]: I1125 16:59:56.005490 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/42a752b2-b9a9-4793-9ce3-1ec7686d1dfc-memberlist\") pod \"speaker-xn4v2\" (UID: \"42a752b2-b9a9-4793-9ce3-1ec7686d1dfc\") " pod="metallb-system/speaker-xn4v2" Nov 25 16:59:56 crc kubenswrapper[4812]: E1125 16:59:56.005756 4812 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 25 16:59:56 crc kubenswrapper[4812]: E1125 16:59:56.006216 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/42a752b2-b9a9-4793-9ce3-1ec7686d1dfc-memberlist podName:42a752b2-b9a9-4793-9ce3-1ec7686d1dfc nodeName:}" failed. No retries permitted until 2025-11-25 16:59:57.006177569 +0000 UTC m=+771.846319704 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/42a752b2-b9a9-4793-9ce3-1ec7686d1dfc-memberlist") pod "speaker-xn4v2" (UID: "42a752b2-b9a9-4793-9ce3-1ec7686d1dfc") : secret "metallb-memberlist" not found Nov 25 16:59:56 crc kubenswrapper[4812]: I1125 16:59:56.105421 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-zp6lc" Nov 25 16:59:56 crc kubenswrapper[4812]: I1125 16:59:56.107422 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/afe617cc-82c1-43f2-9978-316b5672f476-metrics-certs\") pod \"controller-6c7b4b5f48-d9zhl\" (UID: \"afe617cc-82c1-43f2-9978-316b5672f476\") " pod="metallb-system/controller-6c7b4b5f48-d9zhl" Nov 25 16:59:56 crc kubenswrapper[4812]: I1125 16:59:56.111675 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/afe617cc-82c1-43f2-9978-316b5672f476-metrics-certs\") pod \"controller-6c7b4b5f48-d9zhl\" (UID: \"afe617cc-82c1-43f2-9978-316b5672f476\") " pod="metallb-system/controller-6c7b4b5f48-d9zhl" Nov 25 16:59:56 crc kubenswrapper[4812]: I1125 16:59:56.264191 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-d9zhl" Nov 25 16:59:56 crc kubenswrapper[4812]: I1125 16:59:56.505997 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-zp6lc"] Nov 25 16:59:56 crc kubenswrapper[4812]: I1125 16:59:56.641137 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-d9zhl"] Nov 25 16:59:56 crc kubenswrapper[4812]: I1125 16:59:56.667541 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-4qjc2" event={"ID":"9ed09a4c-ab92-4003-8fc0-5fd406d6abd6","Type":"ContainerStarted","Data":"815c04178d9741d7e2f4ac302052700b1975919746d21f9aa8ad5e04b3da9406"} Nov 25 16:59:56 crc kubenswrapper[4812]: I1125 16:59:56.670175 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-zp6lc" event={"ID":"682b6133-49c0-4ef7-8756-7939c15f4fd2","Type":"ContainerStarted","Data":"37a0ffd8e523cb1f83407190cce25d7cae64ed2b308e9549ce197b6e022ab312"} Nov 25 16:59:57 crc kubenswrapper[4812]: I1125 16:59:57.022347 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/42a752b2-b9a9-4793-9ce3-1ec7686d1dfc-memberlist\") pod \"speaker-xn4v2\" (UID: \"42a752b2-b9a9-4793-9ce3-1ec7686d1dfc\") " pod="metallb-system/speaker-xn4v2" Nov 25 16:59:57 crc kubenswrapper[4812]: I1125 16:59:57.028163 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/42a752b2-b9a9-4793-9ce3-1ec7686d1dfc-memberlist\") pod \"speaker-xn4v2\" (UID: \"42a752b2-b9a9-4793-9ce3-1ec7686d1dfc\") " pod="metallb-system/speaker-xn4v2" Nov 25 16:59:57 crc kubenswrapper[4812]: I1125 16:59:57.144424 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-xn4v2" Nov 25 16:59:57 crc kubenswrapper[4812]: W1125 16:59:57.177058 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod42a752b2_b9a9_4793_9ce3_1ec7686d1dfc.slice/crio-7d36364e7118087f201d4faa8a2823c4f09a0ee7f3de8926099050ca4c594076 WatchSource:0}: Error finding container 7d36364e7118087f201d4faa8a2823c4f09a0ee7f3de8926099050ca4c594076: Status 404 returned error can't find the container with id 7d36364e7118087f201d4faa8a2823c4f09a0ee7f3de8926099050ca4c594076 Nov 25 16:59:57 crc kubenswrapper[4812]: I1125 16:59:57.333060 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 16:59:57 crc kubenswrapper[4812]: I1125 16:59:57.333131 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 16:59:57 crc kubenswrapper[4812]: I1125 16:59:57.691518 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-xn4v2" event={"ID":"42a752b2-b9a9-4793-9ce3-1ec7686d1dfc","Type":"ContainerStarted","Data":"8de61bee945f51aec033fa5c3f8e3b6f17376ac89aea2ab75aec134008ac3c3b"} Nov 25 16:59:57 crc kubenswrapper[4812]: I1125 16:59:57.691655 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-xn4v2" event={"ID":"42a752b2-b9a9-4793-9ce3-1ec7686d1dfc","Type":"ContainerStarted","Data":"7d36364e7118087f201d4faa8a2823c4f09a0ee7f3de8926099050ca4c594076"} Nov 25 16:59:57 crc kubenswrapper[4812]: I1125 16:59:57.704439 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-d9zhl" event={"ID":"afe617cc-82c1-43f2-9978-316b5672f476","Type":"ContainerStarted","Data":"d8a00f7d7aad3a970718ad96a37610ed1aab53db51ffc3c940450c8141a03e02"} Nov 25 16:59:57 crc kubenswrapper[4812]: I1125 16:59:57.704497 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-d9zhl" event={"ID":"afe617cc-82c1-43f2-9978-316b5672f476","Type":"ContainerStarted","Data":"2469c676855e5103dff21ed49a622ef6b160c22b72ea44ed2d02da9b8c3878a9"} Nov 25 16:59:57 crc kubenswrapper[4812]: I1125 16:59:57.704510 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-d9zhl" event={"ID":"afe617cc-82c1-43f2-9978-316b5672f476","Type":"ContainerStarted","Data":"b6ce67026f10be2ef56427b098292c52f1966ce4d04e9a02c7b1ccfa1e4c4eae"} Nov 25 16:59:57 crc kubenswrapper[4812]: I1125 16:59:57.705650 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6c7b4b5f48-d9zhl" Nov 25 16:59:57 crc kubenswrapper[4812]: I1125 16:59:57.741695 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6c7b4b5f48-d9zhl" podStartSLOduration=2.741670431 podStartE2EDuration="2.741670431s" podCreationTimestamp="2025-11-25 16:59:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:59:57.73839922 +0000 UTC m=+772.578541315" watchObservedRunningTime="2025-11-25 16:59:57.741670431 +0000 UTC m=+772.581812526" Nov 25 16:59:58 crc kubenswrapper[4812]: I1125 16:59:58.717912 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-xn4v2" event={"ID":"42a752b2-b9a9-4793-9ce3-1ec7686d1dfc","Type":"ContainerStarted","Data":"042ef66306f8fa2ca04f5b4c32755e84783f16f9755e801292745d031d259cf8"} Nov 25 16:59:58 crc kubenswrapper[4812]: I1125 16:59:58.718400 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-xn4v2" Nov 25 16:59:58 crc kubenswrapper[4812]: I1125 16:59:58.735628 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-xn4v2" podStartSLOduration=3.73560806 podStartE2EDuration="3.73560806s" podCreationTimestamp="2025-11-25 16:59:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 16:59:58.73417111 +0000 UTC m=+773.574313205" watchObservedRunningTime="2025-11-25 16:59:58.73560806 +0000 UTC m=+773.575750155" Nov 25 17:00:00 crc kubenswrapper[4812]: I1125 17:00:00.128982 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401500-4nh89"] Nov 25 17:00:00 crc kubenswrapper[4812]: I1125 17:00:00.131006 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401500-4nh89" Nov 25 17:00:00 crc kubenswrapper[4812]: I1125 17:00:00.137127 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401500-4nh89"] Nov 25 17:00:00 crc kubenswrapper[4812]: I1125 17:00:00.137685 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 17:00:00 crc kubenswrapper[4812]: I1125 17:00:00.137930 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 17:00:00 crc kubenswrapper[4812]: I1125 17:00:00.270902 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/eeffb98c-9b48-40c2-9c30-465876bb1a05-config-volume\") pod \"collect-profiles-29401500-4nh89\" (UID: \"eeffb98c-9b48-40c2-9c30-465876bb1a05\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401500-4nh89" Nov 25 17:00:00 crc kubenswrapper[4812]: I1125 17:00:00.270943 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/eeffb98c-9b48-40c2-9c30-465876bb1a05-secret-volume\") pod \"collect-profiles-29401500-4nh89\" (UID: \"eeffb98c-9b48-40c2-9c30-465876bb1a05\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401500-4nh89" Nov 25 17:00:00 crc kubenswrapper[4812]: I1125 17:00:00.271020 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zbkcs\" (UniqueName: \"kubernetes.io/projected/eeffb98c-9b48-40c2-9c30-465876bb1a05-kube-api-access-zbkcs\") pod \"collect-profiles-29401500-4nh89\" (UID: \"eeffb98c-9b48-40c2-9c30-465876bb1a05\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401500-4nh89" Nov 25 17:00:00 crc kubenswrapper[4812]: I1125 17:00:00.372894 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/eeffb98c-9b48-40c2-9c30-465876bb1a05-config-volume\") pod \"collect-profiles-29401500-4nh89\" (UID: \"eeffb98c-9b48-40c2-9c30-465876bb1a05\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401500-4nh89" Nov 25 17:00:00 crc kubenswrapper[4812]: I1125 17:00:00.372937 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/eeffb98c-9b48-40c2-9c30-465876bb1a05-secret-volume\") pod \"collect-profiles-29401500-4nh89\" (UID: \"eeffb98c-9b48-40c2-9c30-465876bb1a05\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401500-4nh89" Nov 25 17:00:00 crc kubenswrapper[4812]: I1125 17:00:00.372978 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zbkcs\" (UniqueName: \"kubernetes.io/projected/eeffb98c-9b48-40c2-9c30-465876bb1a05-kube-api-access-zbkcs\") pod \"collect-profiles-29401500-4nh89\" (UID: \"eeffb98c-9b48-40c2-9c30-465876bb1a05\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401500-4nh89" Nov 25 17:00:00 crc kubenswrapper[4812]: I1125 17:00:00.375441 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/eeffb98c-9b48-40c2-9c30-465876bb1a05-config-volume\") pod \"collect-profiles-29401500-4nh89\" (UID: \"eeffb98c-9b48-40c2-9c30-465876bb1a05\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401500-4nh89" Nov 25 17:00:00 crc kubenswrapper[4812]: I1125 17:00:00.379346 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/eeffb98c-9b48-40c2-9c30-465876bb1a05-secret-volume\") pod \"collect-profiles-29401500-4nh89\" (UID: \"eeffb98c-9b48-40c2-9c30-465876bb1a05\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401500-4nh89" Nov 25 17:00:00 crc kubenswrapper[4812]: I1125 17:00:00.402606 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zbkcs\" (UniqueName: \"kubernetes.io/projected/eeffb98c-9b48-40c2-9c30-465876bb1a05-kube-api-access-zbkcs\") pod \"collect-profiles-29401500-4nh89\" (UID: \"eeffb98c-9b48-40c2-9c30-465876bb1a05\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401500-4nh89" Nov 25 17:00:00 crc kubenswrapper[4812]: I1125 17:00:00.458641 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401500-4nh89" Nov 25 17:00:02 crc kubenswrapper[4812]: I1125 17:00:02.280548 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401500-4nh89"] Nov 25 17:00:02 crc kubenswrapper[4812]: W1125 17:00:02.287308 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podeeffb98c_9b48_40c2_9c30_465876bb1a05.slice/crio-b56e0e09f6430c4ba4cce0ac31686b48db857981aeffb21ed3d7399ed0b48d47 WatchSource:0}: Error finding container b56e0e09f6430c4ba4cce0ac31686b48db857981aeffb21ed3d7399ed0b48d47: Status 404 returned error can't find the container with id b56e0e09f6430c4ba4cce0ac31686b48db857981aeffb21ed3d7399ed0b48d47 Nov 25 17:00:02 crc kubenswrapper[4812]: I1125 17:00:02.739390 4812 generic.go:334] "Generic (PLEG): container finished" podID="eeffb98c-9b48-40c2-9c30-465876bb1a05" containerID="88ba5a2fad5cc62974f0cacc31b9da5955276132b1de7bf8729ff065d5ae5df2" exitCode=0 Nov 25 17:00:02 crc kubenswrapper[4812]: I1125 17:00:02.739515 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401500-4nh89" event={"ID":"eeffb98c-9b48-40c2-9c30-465876bb1a05","Type":"ContainerDied","Data":"88ba5a2fad5cc62974f0cacc31b9da5955276132b1de7bf8729ff065d5ae5df2"} Nov 25 17:00:02 crc kubenswrapper[4812]: I1125 17:00:02.739609 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401500-4nh89" event={"ID":"eeffb98c-9b48-40c2-9c30-465876bb1a05","Type":"ContainerStarted","Data":"b56e0e09f6430c4ba4cce0ac31686b48db857981aeffb21ed3d7399ed0b48d47"} Nov 25 17:00:02 crc kubenswrapper[4812]: I1125 17:00:02.741553 4812 generic.go:334] "Generic (PLEG): container finished" podID="9ed09a4c-ab92-4003-8fc0-5fd406d6abd6" containerID="36e651bfaf4f3b322cc195cffec69887d26544b3911c2000e4763d13b9bb1faf" exitCode=0 Nov 25 17:00:02 crc kubenswrapper[4812]: I1125 17:00:02.741637 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-4qjc2" event={"ID":"9ed09a4c-ab92-4003-8fc0-5fd406d6abd6","Type":"ContainerDied","Data":"36e651bfaf4f3b322cc195cffec69887d26544b3911c2000e4763d13b9bb1faf"} Nov 25 17:00:02 crc kubenswrapper[4812]: I1125 17:00:02.743265 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-zp6lc" event={"ID":"682b6133-49c0-4ef7-8756-7939c15f4fd2","Type":"ContainerStarted","Data":"878a23cfceb2e98687469df6e30dd5b79b55537d67da4808b3f5c6d879b96b3e"} Nov 25 17:00:02 crc kubenswrapper[4812]: I1125 17:00:02.743562 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-6998585d5-zp6lc" Nov 25 17:00:02 crc kubenswrapper[4812]: I1125 17:00:02.799182 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-6998585d5-zp6lc" podStartSLOduration=2.20168216 podStartE2EDuration="7.799150383s" podCreationTimestamp="2025-11-25 16:59:55 +0000 UTC" firstStartedPulling="2025-11-25 16:59:56.514816406 +0000 UTC m=+771.354958501" lastFinishedPulling="2025-11-25 17:00:02.112284629 +0000 UTC m=+776.952426724" observedRunningTime="2025-11-25 17:00:02.795584065 +0000 UTC m=+777.635726160" watchObservedRunningTime="2025-11-25 17:00:02.799150383 +0000 UTC m=+777.639292478" Nov 25 17:00:03 crc kubenswrapper[4812]: I1125 17:00:03.751253 4812 generic.go:334] "Generic (PLEG): container finished" podID="9ed09a4c-ab92-4003-8fc0-5fd406d6abd6" containerID="eb020a4f4b7a2683690277d522b6477fa58b8c89ac9927a369a9e29e5b69ca59" exitCode=0 Nov 25 17:00:03 crc kubenswrapper[4812]: I1125 17:00:03.751326 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-4qjc2" event={"ID":"9ed09a4c-ab92-4003-8fc0-5fd406d6abd6","Type":"ContainerDied","Data":"eb020a4f4b7a2683690277d522b6477fa58b8c89ac9927a369a9e29e5b69ca59"} Nov 25 17:00:04 crc kubenswrapper[4812]: I1125 17:00:04.095208 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401500-4nh89" Nov 25 17:00:04 crc kubenswrapper[4812]: I1125 17:00:04.247712 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/eeffb98c-9b48-40c2-9c30-465876bb1a05-secret-volume\") pod \"eeffb98c-9b48-40c2-9c30-465876bb1a05\" (UID: \"eeffb98c-9b48-40c2-9c30-465876bb1a05\") " Nov 25 17:00:04 crc kubenswrapper[4812]: I1125 17:00:04.247810 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zbkcs\" (UniqueName: \"kubernetes.io/projected/eeffb98c-9b48-40c2-9c30-465876bb1a05-kube-api-access-zbkcs\") pod \"eeffb98c-9b48-40c2-9c30-465876bb1a05\" (UID: \"eeffb98c-9b48-40c2-9c30-465876bb1a05\") " Nov 25 17:00:04 crc kubenswrapper[4812]: I1125 17:00:04.247875 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/eeffb98c-9b48-40c2-9c30-465876bb1a05-config-volume\") pod \"eeffb98c-9b48-40c2-9c30-465876bb1a05\" (UID: \"eeffb98c-9b48-40c2-9c30-465876bb1a05\") " Nov 25 17:00:04 crc kubenswrapper[4812]: I1125 17:00:04.248562 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eeffb98c-9b48-40c2-9c30-465876bb1a05-config-volume" (OuterVolumeSpecName: "config-volume") pod "eeffb98c-9b48-40c2-9c30-465876bb1a05" (UID: "eeffb98c-9b48-40c2-9c30-465876bb1a05"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:00:04 crc kubenswrapper[4812]: I1125 17:00:04.253414 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eeffb98c-9b48-40c2-9c30-465876bb1a05-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "eeffb98c-9b48-40c2-9c30-465876bb1a05" (UID: "eeffb98c-9b48-40c2-9c30-465876bb1a05"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:00:04 crc kubenswrapper[4812]: I1125 17:00:04.253724 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eeffb98c-9b48-40c2-9c30-465876bb1a05-kube-api-access-zbkcs" (OuterVolumeSpecName: "kube-api-access-zbkcs") pod "eeffb98c-9b48-40c2-9c30-465876bb1a05" (UID: "eeffb98c-9b48-40c2-9c30-465876bb1a05"). InnerVolumeSpecName "kube-api-access-zbkcs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:00:04 crc kubenswrapper[4812]: I1125 17:00:04.349325 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zbkcs\" (UniqueName: \"kubernetes.io/projected/eeffb98c-9b48-40c2-9c30-465876bb1a05-kube-api-access-zbkcs\") on node \"crc\" DevicePath \"\"" Nov 25 17:00:04 crc kubenswrapper[4812]: I1125 17:00:04.349363 4812 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/eeffb98c-9b48-40c2-9c30-465876bb1a05-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 17:00:04 crc kubenswrapper[4812]: I1125 17:00:04.349372 4812 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/eeffb98c-9b48-40c2-9c30-465876bb1a05-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 17:00:04 crc kubenswrapper[4812]: I1125 17:00:04.757866 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401500-4nh89" event={"ID":"eeffb98c-9b48-40c2-9c30-465876bb1a05","Type":"ContainerDied","Data":"b56e0e09f6430c4ba4cce0ac31686b48db857981aeffb21ed3d7399ed0b48d47"} Nov 25 17:00:04 crc kubenswrapper[4812]: I1125 17:00:04.757907 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401500-4nh89" Nov 25 17:00:04 crc kubenswrapper[4812]: I1125 17:00:04.757911 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b56e0e09f6430c4ba4cce0ac31686b48db857981aeffb21ed3d7399ed0b48d47" Nov 25 17:00:04 crc kubenswrapper[4812]: I1125 17:00:04.759816 4812 generic.go:334] "Generic (PLEG): container finished" podID="9ed09a4c-ab92-4003-8fc0-5fd406d6abd6" containerID="4f4a12f6f5acbe6b391923573c77c83a986f2c1c90b7222f38077263f4d80222" exitCode=0 Nov 25 17:00:04 crc kubenswrapper[4812]: I1125 17:00:04.759842 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-4qjc2" event={"ID":"9ed09a4c-ab92-4003-8fc0-5fd406d6abd6","Type":"ContainerDied","Data":"4f4a12f6f5acbe6b391923573c77c83a986f2c1c90b7222f38077263f4d80222"} Nov 25 17:00:05 crc kubenswrapper[4812]: I1125 17:00:05.771372 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-4qjc2" event={"ID":"9ed09a4c-ab92-4003-8fc0-5fd406d6abd6","Type":"ContainerStarted","Data":"2089c4ca26b30767d0eff4c4b664e13d229d72fb47d9a0485ba14b57d32658f7"} Nov 25 17:00:05 crc kubenswrapper[4812]: I1125 17:00:05.771460 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-4qjc2" event={"ID":"9ed09a4c-ab92-4003-8fc0-5fd406d6abd6","Type":"ContainerStarted","Data":"cf1e60c0d63073a90805b2be3bb7d97e4467db62dcd387b6ab0a7894cac7ac55"} Nov 25 17:00:05 crc kubenswrapper[4812]: I1125 17:00:05.771471 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-4qjc2" event={"ID":"9ed09a4c-ab92-4003-8fc0-5fd406d6abd6","Type":"ContainerStarted","Data":"8f1b8af65b8fbba3c406b153b5a4a8de9ff9325706b4c298d1700a05bd17a7d4"} Nov 25 17:00:05 crc kubenswrapper[4812]: I1125 17:00:05.771481 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-4qjc2" event={"ID":"9ed09a4c-ab92-4003-8fc0-5fd406d6abd6","Type":"ContainerStarted","Data":"a8ee99a4fa725a7c874003ddcdd7104a5c07eb3f0fbd9ec88823800da4ed5054"} Nov 25 17:00:05 crc kubenswrapper[4812]: I1125 17:00:05.771492 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-4qjc2" event={"ID":"9ed09a4c-ab92-4003-8fc0-5fd406d6abd6","Type":"ContainerStarted","Data":"b20247d6b278b938dfcc46d57dc43564b1b308cc9edebd9f51121b559c1e4572"} Nov 25 17:00:05 crc kubenswrapper[4812]: I1125 17:00:05.771502 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-4qjc2" event={"ID":"9ed09a4c-ab92-4003-8fc0-5fd406d6abd6","Type":"ContainerStarted","Data":"31e496e611cf10d7949ebfce6e476c7589dc8d236d7ec208fe0f1252776d20ed"} Nov 25 17:00:05 crc kubenswrapper[4812]: I1125 17:00:05.771576 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-4qjc2" Nov 25 17:00:05 crc kubenswrapper[4812]: I1125 17:00:05.794039 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-4qjc2" podStartSLOduration=4.382862527 podStartE2EDuration="10.794020294s" podCreationTimestamp="2025-11-25 16:59:55 +0000 UTC" firstStartedPulling="2025-11-25 16:59:55.71548831 +0000 UTC m=+770.555630405" lastFinishedPulling="2025-11-25 17:00:02.126646077 +0000 UTC m=+776.966788172" observedRunningTime="2025-11-25 17:00:05.790253929 +0000 UTC m=+780.630396034" watchObservedRunningTime="2025-11-25 17:00:05.794020294 +0000 UTC m=+780.634162389" Nov 25 17:00:06 crc kubenswrapper[4812]: I1125 17:00:06.268719 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6c7b4b5f48-d9zhl" Nov 25 17:00:07 crc kubenswrapper[4812]: I1125 17:00:07.151265 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-xn4v2" Nov 25 17:00:09 crc kubenswrapper[4812]: I1125 17:00:09.927907 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-d65st"] Nov 25 17:00:09 crc kubenswrapper[4812]: E1125 17:00:09.928227 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eeffb98c-9b48-40c2-9c30-465876bb1a05" containerName="collect-profiles" Nov 25 17:00:09 crc kubenswrapper[4812]: I1125 17:00:09.928245 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="eeffb98c-9b48-40c2-9c30-465876bb1a05" containerName="collect-profiles" Nov 25 17:00:09 crc kubenswrapper[4812]: I1125 17:00:09.928382 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="eeffb98c-9b48-40c2-9c30-465876bb1a05" containerName="collect-profiles" Nov 25 17:00:09 crc kubenswrapper[4812]: I1125 17:00:09.928878 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-d65st" Nov 25 17:00:09 crc kubenswrapper[4812]: I1125 17:00:09.931521 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 25 17:00:09 crc kubenswrapper[4812]: I1125 17:00:09.932781 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-jfwbj" Nov 25 17:00:09 crc kubenswrapper[4812]: I1125 17:00:09.933823 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 25 17:00:09 crc kubenswrapper[4812]: I1125 17:00:09.940264 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-d65st"] Nov 25 17:00:10 crc kubenswrapper[4812]: I1125 17:00:10.027211 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w8vc6\" (UniqueName: \"kubernetes.io/projected/483b17a4-3e20-4968-b2af-69a4a254a15c-kube-api-access-w8vc6\") pod \"openstack-operator-index-d65st\" (UID: \"483b17a4-3e20-4968-b2af-69a4a254a15c\") " pod="openstack-operators/openstack-operator-index-d65st" Nov 25 17:00:10 crc kubenswrapper[4812]: I1125 17:00:10.128680 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w8vc6\" (UniqueName: \"kubernetes.io/projected/483b17a4-3e20-4968-b2af-69a4a254a15c-kube-api-access-w8vc6\") pod \"openstack-operator-index-d65st\" (UID: \"483b17a4-3e20-4968-b2af-69a4a254a15c\") " pod="openstack-operators/openstack-operator-index-d65st" Nov 25 17:00:10 crc kubenswrapper[4812]: I1125 17:00:10.154562 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w8vc6\" (UniqueName: \"kubernetes.io/projected/483b17a4-3e20-4968-b2af-69a4a254a15c-kube-api-access-w8vc6\") pod \"openstack-operator-index-d65st\" (UID: \"483b17a4-3e20-4968-b2af-69a4a254a15c\") " pod="openstack-operators/openstack-operator-index-d65st" Nov 25 17:00:10 crc kubenswrapper[4812]: I1125 17:00:10.246734 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-d65st" Nov 25 17:00:10 crc kubenswrapper[4812]: I1125 17:00:10.498353 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-4qjc2" Nov 25 17:00:10 crc kubenswrapper[4812]: I1125 17:00:10.535999 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-4qjc2" Nov 25 17:00:10 crc kubenswrapper[4812]: I1125 17:00:10.621250 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-d65st"] Nov 25 17:00:10 crc kubenswrapper[4812]: W1125 17:00:10.625191 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod483b17a4_3e20_4968_b2af_69a4a254a15c.slice/crio-4f3e25bd07b5a3d5ad26f93ae15c142b3b47a932fbe430878341cc24ab37b580 WatchSource:0}: Error finding container 4f3e25bd07b5a3d5ad26f93ae15c142b3b47a932fbe430878341cc24ab37b580: Status 404 returned error can't find the container with id 4f3e25bd07b5a3d5ad26f93ae15c142b3b47a932fbe430878341cc24ab37b580 Nov 25 17:00:10 crc kubenswrapper[4812]: I1125 17:00:10.799506 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-d65st" event={"ID":"483b17a4-3e20-4968-b2af-69a4a254a15c","Type":"ContainerStarted","Data":"4f3e25bd07b5a3d5ad26f93ae15c142b3b47a932fbe430878341cc24ab37b580"} Nov 25 17:00:13 crc kubenswrapper[4812]: I1125 17:00:13.112141 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-d65st"] Nov 25 17:00:13 crc kubenswrapper[4812]: I1125 17:00:13.717421 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-hnrcp"] Nov 25 17:00:13 crc kubenswrapper[4812]: I1125 17:00:13.718786 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-hnrcp" Nov 25 17:00:13 crc kubenswrapper[4812]: I1125 17:00:13.726227 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-hnrcp"] Nov 25 17:00:13 crc kubenswrapper[4812]: I1125 17:00:13.783622 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m8sqd\" (UniqueName: \"kubernetes.io/projected/ea42a564-ecfe-440d-9fc3-22f52b0296af-kube-api-access-m8sqd\") pod \"openstack-operator-index-hnrcp\" (UID: \"ea42a564-ecfe-440d-9fc3-22f52b0296af\") " pod="openstack-operators/openstack-operator-index-hnrcp" Nov 25 17:00:13 crc kubenswrapper[4812]: I1125 17:00:13.819553 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-d65st" event={"ID":"483b17a4-3e20-4968-b2af-69a4a254a15c","Type":"ContainerStarted","Data":"c2289907c104bec810ca67ea949acb01b9227d065b93052742d296d7dce76890"} Nov 25 17:00:13 crc kubenswrapper[4812]: I1125 17:00:13.835359 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-d65st" podStartSLOduration=2.605638281 podStartE2EDuration="4.835330127s" podCreationTimestamp="2025-11-25 17:00:09 +0000 UTC" firstStartedPulling="2025-11-25 17:00:10.628155165 +0000 UTC m=+785.468297260" lastFinishedPulling="2025-11-25 17:00:12.857847021 +0000 UTC m=+787.697989106" observedRunningTime="2025-11-25 17:00:13.834682172 +0000 UTC m=+788.674824317" watchObservedRunningTime="2025-11-25 17:00:13.835330127 +0000 UTC m=+788.675472222" Nov 25 17:00:13 crc kubenswrapper[4812]: I1125 17:00:13.884681 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m8sqd\" (UniqueName: \"kubernetes.io/projected/ea42a564-ecfe-440d-9fc3-22f52b0296af-kube-api-access-m8sqd\") pod \"openstack-operator-index-hnrcp\" (UID: \"ea42a564-ecfe-440d-9fc3-22f52b0296af\") " pod="openstack-operators/openstack-operator-index-hnrcp" Nov 25 17:00:13 crc kubenswrapper[4812]: I1125 17:00:13.906705 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m8sqd\" (UniqueName: \"kubernetes.io/projected/ea42a564-ecfe-440d-9fc3-22f52b0296af-kube-api-access-m8sqd\") pod \"openstack-operator-index-hnrcp\" (UID: \"ea42a564-ecfe-440d-9fc3-22f52b0296af\") " pod="openstack-operators/openstack-operator-index-hnrcp" Nov 25 17:00:14 crc kubenswrapper[4812]: I1125 17:00:14.036350 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-hnrcp" Nov 25 17:00:14 crc kubenswrapper[4812]: I1125 17:00:14.440709 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-hnrcp"] Nov 25 17:00:14 crc kubenswrapper[4812]: W1125 17:00:14.450714 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podea42a564_ecfe_440d_9fc3_22f52b0296af.slice/crio-0096774bc7f3dac28de105b7628ebc68e4a7bbe7e2fe8fc3981e7575d3799651 WatchSource:0}: Error finding container 0096774bc7f3dac28de105b7628ebc68e4a7bbe7e2fe8fc3981e7575d3799651: Status 404 returned error can't find the container with id 0096774bc7f3dac28de105b7628ebc68e4a7bbe7e2fe8fc3981e7575d3799651 Nov 25 17:00:14 crc kubenswrapper[4812]: I1125 17:00:14.827367 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-hnrcp" event={"ID":"ea42a564-ecfe-440d-9fc3-22f52b0296af","Type":"ContainerStarted","Data":"82bd6c71a4f04dfffa177f929bad9d55c2d3439ddf8a5576ef62e5964a2b2406"} Nov 25 17:00:14 crc kubenswrapper[4812]: I1125 17:00:14.827429 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-hnrcp" event={"ID":"ea42a564-ecfe-440d-9fc3-22f52b0296af","Type":"ContainerStarted","Data":"0096774bc7f3dac28de105b7628ebc68e4a7bbe7e2fe8fc3981e7575d3799651"} Nov 25 17:00:14 crc kubenswrapper[4812]: I1125 17:00:14.827564 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-d65st" podUID="483b17a4-3e20-4968-b2af-69a4a254a15c" containerName="registry-server" containerID="cri-o://c2289907c104bec810ca67ea949acb01b9227d065b93052742d296d7dce76890" gracePeriod=2 Nov 25 17:00:14 crc kubenswrapper[4812]: I1125 17:00:14.853291 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-hnrcp" podStartSLOduration=1.812423844 podStartE2EDuration="1.853261425s" podCreationTimestamp="2025-11-25 17:00:13 +0000 UTC" firstStartedPulling="2025-11-25 17:00:14.453721564 +0000 UTC m=+789.293863659" lastFinishedPulling="2025-11-25 17:00:14.494559145 +0000 UTC m=+789.334701240" observedRunningTime="2025-11-25 17:00:14.845934341 +0000 UTC m=+789.686076446" watchObservedRunningTime="2025-11-25 17:00:14.853261425 +0000 UTC m=+789.693403540" Nov 25 17:00:14 crc kubenswrapper[4812]: E1125 17:00:14.949519 4812 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod483b17a4_3e20_4968_b2af_69a4a254a15c.slice/crio-conmon-c2289907c104bec810ca67ea949acb01b9227d065b93052742d296d7dce76890.scope\": RecentStats: unable to find data in memory cache]" Nov 25 17:00:15 crc kubenswrapper[4812]: I1125 17:00:15.251546 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-d65st" Nov 25 17:00:15 crc kubenswrapper[4812]: I1125 17:00:15.404460 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w8vc6\" (UniqueName: \"kubernetes.io/projected/483b17a4-3e20-4968-b2af-69a4a254a15c-kube-api-access-w8vc6\") pod \"483b17a4-3e20-4968-b2af-69a4a254a15c\" (UID: \"483b17a4-3e20-4968-b2af-69a4a254a15c\") " Nov 25 17:00:15 crc kubenswrapper[4812]: I1125 17:00:15.409820 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/483b17a4-3e20-4968-b2af-69a4a254a15c-kube-api-access-w8vc6" (OuterVolumeSpecName: "kube-api-access-w8vc6") pod "483b17a4-3e20-4968-b2af-69a4a254a15c" (UID: "483b17a4-3e20-4968-b2af-69a4a254a15c"). InnerVolumeSpecName "kube-api-access-w8vc6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:00:15 crc kubenswrapper[4812]: I1125 17:00:15.501390 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-4qjc2" Nov 25 17:00:15 crc kubenswrapper[4812]: I1125 17:00:15.505996 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w8vc6\" (UniqueName: \"kubernetes.io/projected/483b17a4-3e20-4968-b2af-69a4a254a15c-kube-api-access-w8vc6\") on node \"crc\" DevicePath \"\"" Nov 25 17:00:15 crc kubenswrapper[4812]: I1125 17:00:15.835427 4812 generic.go:334] "Generic (PLEG): container finished" podID="483b17a4-3e20-4968-b2af-69a4a254a15c" containerID="c2289907c104bec810ca67ea949acb01b9227d065b93052742d296d7dce76890" exitCode=0 Nov 25 17:00:15 crc kubenswrapper[4812]: I1125 17:00:15.835809 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-d65st" Nov 25 17:00:15 crc kubenswrapper[4812]: I1125 17:00:15.839241 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-d65st" event={"ID":"483b17a4-3e20-4968-b2af-69a4a254a15c","Type":"ContainerDied","Data":"c2289907c104bec810ca67ea949acb01b9227d065b93052742d296d7dce76890"} Nov 25 17:00:15 crc kubenswrapper[4812]: I1125 17:00:15.839379 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-d65st" event={"ID":"483b17a4-3e20-4968-b2af-69a4a254a15c","Type":"ContainerDied","Data":"4f3e25bd07b5a3d5ad26f93ae15c142b3b47a932fbe430878341cc24ab37b580"} Nov 25 17:00:15 crc kubenswrapper[4812]: I1125 17:00:15.839405 4812 scope.go:117] "RemoveContainer" containerID="c2289907c104bec810ca67ea949acb01b9227d065b93052742d296d7dce76890" Nov 25 17:00:15 crc kubenswrapper[4812]: I1125 17:00:15.857236 4812 scope.go:117] "RemoveContainer" containerID="c2289907c104bec810ca67ea949acb01b9227d065b93052742d296d7dce76890" Nov 25 17:00:15 crc kubenswrapper[4812]: E1125 17:00:15.858129 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c2289907c104bec810ca67ea949acb01b9227d065b93052742d296d7dce76890\": container with ID starting with c2289907c104bec810ca67ea949acb01b9227d065b93052742d296d7dce76890 not found: ID does not exist" containerID="c2289907c104bec810ca67ea949acb01b9227d065b93052742d296d7dce76890" Nov 25 17:00:15 crc kubenswrapper[4812]: I1125 17:00:15.858176 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c2289907c104bec810ca67ea949acb01b9227d065b93052742d296d7dce76890"} err="failed to get container status \"c2289907c104bec810ca67ea949acb01b9227d065b93052742d296d7dce76890\": rpc error: code = NotFound desc = could not find container \"c2289907c104bec810ca67ea949acb01b9227d065b93052742d296d7dce76890\": container with ID starting with c2289907c104bec810ca67ea949acb01b9227d065b93052742d296d7dce76890 not found: ID does not exist" Nov 25 17:00:15 crc kubenswrapper[4812]: I1125 17:00:15.875029 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-d65st"] Nov 25 17:00:15 crc kubenswrapper[4812]: I1125 17:00:15.879692 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-d65st"] Nov 25 17:00:16 crc kubenswrapper[4812]: I1125 17:00:16.111933 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-6998585d5-zp6lc" Nov 25 17:00:17 crc kubenswrapper[4812]: I1125 17:00:17.844208 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="483b17a4-3e20-4968-b2af-69a4a254a15c" path="/var/lib/kubelet/pods/483b17a4-3e20-4968-b2af-69a4a254a15c/volumes" Nov 25 17:00:24 crc kubenswrapper[4812]: I1125 17:00:24.036737 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-hnrcp" Nov 25 17:00:24 crc kubenswrapper[4812]: I1125 17:00:24.037188 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-hnrcp" Nov 25 17:00:24 crc kubenswrapper[4812]: I1125 17:00:24.057765 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-hnrcp" Nov 25 17:00:24 crc kubenswrapper[4812]: I1125 17:00:24.913136 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-hnrcp" Nov 25 17:00:25 crc kubenswrapper[4812]: I1125 17:00:25.552693 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/108d5cdaa24cb5b2d5ae90ff353b9bcddac02d61d124585e89f7ceb41cn8cw2"] Nov 25 17:00:25 crc kubenswrapper[4812]: E1125 17:00:25.553255 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="483b17a4-3e20-4968-b2af-69a4a254a15c" containerName="registry-server" Nov 25 17:00:25 crc kubenswrapper[4812]: I1125 17:00:25.553269 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="483b17a4-3e20-4968-b2af-69a4a254a15c" containerName="registry-server" Nov 25 17:00:25 crc kubenswrapper[4812]: I1125 17:00:25.553388 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="483b17a4-3e20-4968-b2af-69a4a254a15c" containerName="registry-server" Nov 25 17:00:25 crc kubenswrapper[4812]: I1125 17:00:25.554280 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/108d5cdaa24cb5b2d5ae90ff353b9bcddac02d61d124585e89f7ceb41cn8cw2" Nov 25 17:00:25 crc kubenswrapper[4812]: I1125 17:00:25.556177 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-g2bkl" Nov 25 17:00:25 crc kubenswrapper[4812]: I1125 17:00:25.562414 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/108d5cdaa24cb5b2d5ae90ff353b9bcddac02d61d124585e89f7ceb41cn8cw2"] Nov 25 17:00:25 crc kubenswrapper[4812]: I1125 17:00:25.654582 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gf46l\" (UniqueName: \"kubernetes.io/projected/3597e30c-17c6-408b-b584-fe1a9907359a-kube-api-access-gf46l\") pod \"108d5cdaa24cb5b2d5ae90ff353b9bcddac02d61d124585e89f7ceb41cn8cw2\" (UID: \"3597e30c-17c6-408b-b584-fe1a9907359a\") " pod="openstack-operators/108d5cdaa24cb5b2d5ae90ff353b9bcddac02d61d124585e89f7ceb41cn8cw2" Nov 25 17:00:25 crc kubenswrapper[4812]: I1125 17:00:25.654682 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3597e30c-17c6-408b-b584-fe1a9907359a-util\") pod \"108d5cdaa24cb5b2d5ae90ff353b9bcddac02d61d124585e89f7ceb41cn8cw2\" (UID: \"3597e30c-17c6-408b-b584-fe1a9907359a\") " pod="openstack-operators/108d5cdaa24cb5b2d5ae90ff353b9bcddac02d61d124585e89f7ceb41cn8cw2" Nov 25 17:00:25 crc kubenswrapper[4812]: I1125 17:00:25.654761 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3597e30c-17c6-408b-b584-fe1a9907359a-bundle\") pod \"108d5cdaa24cb5b2d5ae90ff353b9bcddac02d61d124585e89f7ceb41cn8cw2\" (UID: \"3597e30c-17c6-408b-b584-fe1a9907359a\") " pod="openstack-operators/108d5cdaa24cb5b2d5ae90ff353b9bcddac02d61d124585e89f7ceb41cn8cw2" Nov 25 17:00:25 crc kubenswrapper[4812]: I1125 17:00:25.756850 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gf46l\" (UniqueName: \"kubernetes.io/projected/3597e30c-17c6-408b-b584-fe1a9907359a-kube-api-access-gf46l\") pod \"108d5cdaa24cb5b2d5ae90ff353b9bcddac02d61d124585e89f7ceb41cn8cw2\" (UID: \"3597e30c-17c6-408b-b584-fe1a9907359a\") " pod="openstack-operators/108d5cdaa24cb5b2d5ae90ff353b9bcddac02d61d124585e89f7ceb41cn8cw2" Nov 25 17:00:25 crc kubenswrapper[4812]: I1125 17:00:25.756955 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3597e30c-17c6-408b-b584-fe1a9907359a-util\") pod \"108d5cdaa24cb5b2d5ae90ff353b9bcddac02d61d124585e89f7ceb41cn8cw2\" (UID: \"3597e30c-17c6-408b-b584-fe1a9907359a\") " pod="openstack-operators/108d5cdaa24cb5b2d5ae90ff353b9bcddac02d61d124585e89f7ceb41cn8cw2" Nov 25 17:00:25 crc kubenswrapper[4812]: I1125 17:00:25.757017 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3597e30c-17c6-408b-b584-fe1a9907359a-bundle\") pod \"108d5cdaa24cb5b2d5ae90ff353b9bcddac02d61d124585e89f7ceb41cn8cw2\" (UID: \"3597e30c-17c6-408b-b584-fe1a9907359a\") " pod="openstack-operators/108d5cdaa24cb5b2d5ae90ff353b9bcddac02d61d124585e89f7ceb41cn8cw2" Nov 25 17:00:25 crc kubenswrapper[4812]: I1125 17:00:25.757624 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3597e30c-17c6-408b-b584-fe1a9907359a-util\") pod \"108d5cdaa24cb5b2d5ae90ff353b9bcddac02d61d124585e89f7ceb41cn8cw2\" (UID: \"3597e30c-17c6-408b-b584-fe1a9907359a\") " pod="openstack-operators/108d5cdaa24cb5b2d5ae90ff353b9bcddac02d61d124585e89f7ceb41cn8cw2" Nov 25 17:00:25 crc kubenswrapper[4812]: I1125 17:00:25.757659 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3597e30c-17c6-408b-b584-fe1a9907359a-bundle\") pod \"108d5cdaa24cb5b2d5ae90ff353b9bcddac02d61d124585e89f7ceb41cn8cw2\" (UID: \"3597e30c-17c6-408b-b584-fe1a9907359a\") " pod="openstack-operators/108d5cdaa24cb5b2d5ae90ff353b9bcddac02d61d124585e89f7ceb41cn8cw2" Nov 25 17:00:25 crc kubenswrapper[4812]: I1125 17:00:25.778489 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gf46l\" (UniqueName: \"kubernetes.io/projected/3597e30c-17c6-408b-b584-fe1a9907359a-kube-api-access-gf46l\") pod \"108d5cdaa24cb5b2d5ae90ff353b9bcddac02d61d124585e89f7ceb41cn8cw2\" (UID: \"3597e30c-17c6-408b-b584-fe1a9907359a\") " pod="openstack-operators/108d5cdaa24cb5b2d5ae90ff353b9bcddac02d61d124585e89f7ceb41cn8cw2" Nov 25 17:00:25 crc kubenswrapper[4812]: I1125 17:00:25.882904 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/108d5cdaa24cb5b2d5ae90ff353b9bcddac02d61d124585e89f7ceb41cn8cw2" Nov 25 17:00:26 crc kubenswrapper[4812]: I1125 17:00:26.279822 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/108d5cdaa24cb5b2d5ae90ff353b9bcddac02d61d124585e89f7ceb41cn8cw2"] Nov 25 17:00:26 crc kubenswrapper[4812]: W1125 17:00:26.283614 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3597e30c_17c6_408b_b584_fe1a9907359a.slice/crio-50ff62bf4c4acc2faefebe116c674085cbfcabc0faf79ae7316395b30e218cf4 WatchSource:0}: Error finding container 50ff62bf4c4acc2faefebe116c674085cbfcabc0faf79ae7316395b30e218cf4: Status 404 returned error can't find the container with id 50ff62bf4c4acc2faefebe116c674085cbfcabc0faf79ae7316395b30e218cf4 Nov 25 17:00:26 crc kubenswrapper[4812]: I1125 17:00:26.897994 4812 generic.go:334] "Generic (PLEG): container finished" podID="3597e30c-17c6-408b-b584-fe1a9907359a" containerID="cb86f6c4dd5efe9760e71be79e9d7cba713a823559d47bc03006c3dfefd8b63d" exitCode=0 Nov 25 17:00:26 crc kubenswrapper[4812]: I1125 17:00:26.898057 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/108d5cdaa24cb5b2d5ae90ff353b9bcddac02d61d124585e89f7ceb41cn8cw2" event={"ID":"3597e30c-17c6-408b-b584-fe1a9907359a","Type":"ContainerDied","Data":"cb86f6c4dd5efe9760e71be79e9d7cba713a823559d47bc03006c3dfefd8b63d"} Nov 25 17:00:26 crc kubenswrapper[4812]: I1125 17:00:26.898269 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/108d5cdaa24cb5b2d5ae90ff353b9bcddac02d61d124585e89f7ceb41cn8cw2" event={"ID":"3597e30c-17c6-408b-b584-fe1a9907359a","Type":"ContainerStarted","Data":"50ff62bf4c4acc2faefebe116c674085cbfcabc0faf79ae7316395b30e218cf4"} Nov 25 17:00:27 crc kubenswrapper[4812]: I1125 17:00:27.332945 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:00:27 crc kubenswrapper[4812]: I1125 17:00:27.333045 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:00:27 crc kubenswrapper[4812]: I1125 17:00:27.906208 4812 generic.go:334] "Generic (PLEG): container finished" podID="3597e30c-17c6-408b-b584-fe1a9907359a" containerID="3ae039d60c6562f36d7724006d1e8affba12ae57ace896d7040a0dbf5561bc35" exitCode=0 Nov 25 17:00:27 crc kubenswrapper[4812]: I1125 17:00:27.906284 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/108d5cdaa24cb5b2d5ae90ff353b9bcddac02d61d124585e89f7ceb41cn8cw2" event={"ID":"3597e30c-17c6-408b-b584-fe1a9907359a","Type":"ContainerDied","Data":"3ae039d60c6562f36d7724006d1e8affba12ae57ace896d7040a0dbf5561bc35"} Nov 25 17:00:28 crc kubenswrapper[4812]: I1125 17:00:28.920646 4812 generic.go:334] "Generic (PLEG): container finished" podID="3597e30c-17c6-408b-b584-fe1a9907359a" containerID="f0f7d7378915f37d0c2cc5efc248af5f5bcfef2f2c8472e5b2f8c0d1d586836b" exitCode=0 Nov 25 17:00:28 crc kubenswrapper[4812]: I1125 17:00:28.920734 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/108d5cdaa24cb5b2d5ae90ff353b9bcddac02d61d124585e89f7ceb41cn8cw2" event={"ID":"3597e30c-17c6-408b-b584-fe1a9907359a","Type":"ContainerDied","Data":"f0f7d7378915f37d0c2cc5efc248af5f5bcfef2f2c8472e5b2f8c0d1d586836b"} Nov 25 17:00:29 crc kubenswrapper[4812]: I1125 17:00:29.530966 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-bgpwc"] Nov 25 17:00:29 crc kubenswrapper[4812]: I1125 17:00:29.533455 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bgpwc" Nov 25 17:00:29 crc kubenswrapper[4812]: I1125 17:00:29.538351 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bgpwc"] Nov 25 17:00:29 crc kubenswrapper[4812]: I1125 17:00:29.613756 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/655c4c3b-beac-4aef-a366-556c01011bdc-utilities\") pod \"redhat-marketplace-bgpwc\" (UID: \"655c4c3b-beac-4aef-a366-556c01011bdc\") " pod="openshift-marketplace/redhat-marketplace-bgpwc" Nov 25 17:00:29 crc kubenswrapper[4812]: I1125 17:00:29.613854 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/655c4c3b-beac-4aef-a366-556c01011bdc-catalog-content\") pod \"redhat-marketplace-bgpwc\" (UID: \"655c4c3b-beac-4aef-a366-556c01011bdc\") " pod="openshift-marketplace/redhat-marketplace-bgpwc" Nov 25 17:00:29 crc kubenswrapper[4812]: I1125 17:00:29.613930 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4ftvg\" (UniqueName: \"kubernetes.io/projected/655c4c3b-beac-4aef-a366-556c01011bdc-kube-api-access-4ftvg\") pod \"redhat-marketplace-bgpwc\" (UID: \"655c4c3b-beac-4aef-a366-556c01011bdc\") " pod="openshift-marketplace/redhat-marketplace-bgpwc" Nov 25 17:00:29 crc kubenswrapper[4812]: I1125 17:00:29.715340 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4ftvg\" (UniqueName: \"kubernetes.io/projected/655c4c3b-beac-4aef-a366-556c01011bdc-kube-api-access-4ftvg\") pod \"redhat-marketplace-bgpwc\" (UID: \"655c4c3b-beac-4aef-a366-556c01011bdc\") " pod="openshift-marketplace/redhat-marketplace-bgpwc" Nov 25 17:00:29 crc kubenswrapper[4812]: I1125 17:00:29.715414 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/655c4c3b-beac-4aef-a366-556c01011bdc-utilities\") pod \"redhat-marketplace-bgpwc\" (UID: \"655c4c3b-beac-4aef-a366-556c01011bdc\") " pod="openshift-marketplace/redhat-marketplace-bgpwc" Nov 25 17:00:29 crc kubenswrapper[4812]: I1125 17:00:29.715464 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/655c4c3b-beac-4aef-a366-556c01011bdc-catalog-content\") pod \"redhat-marketplace-bgpwc\" (UID: \"655c4c3b-beac-4aef-a366-556c01011bdc\") " pod="openshift-marketplace/redhat-marketplace-bgpwc" Nov 25 17:00:29 crc kubenswrapper[4812]: I1125 17:00:29.715979 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/655c4c3b-beac-4aef-a366-556c01011bdc-catalog-content\") pod \"redhat-marketplace-bgpwc\" (UID: \"655c4c3b-beac-4aef-a366-556c01011bdc\") " pod="openshift-marketplace/redhat-marketplace-bgpwc" Nov 25 17:00:29 crc kubenswrapper[4812]: I1125 17:00:29.716430 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/655c4c3b-beac-4aef-a366-556c01011bdc-utilities\") pod \"redhat-marketplace-bgpwc\" (UID: \"655c4c3b-beac-4aef-a366-556c01011bdc\") " pod="openshift-marketplace/redhat-marketplace-bgpwc" Nov 25 17:00:29 crc kubenswrapper[4812]: I1125 17:00:29.736673 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4ftvg\" (UniqueName: \"kubernetes.io/projected/655c4c3b-beac-4aef-a366-556c01011bdc-kube-api-access-4ftvg\") pod \"redhat-marketplace-bgpwc\" (UID: \"655c4c3b-beac-4aef-a366-556c01011bdc\") " pod="openshift-marketplace/redhat-marketplace-bgpwc" Nov 25 17:00:29 crc kubenswrapper[4812]: I1125 17:00:29.850456 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bgpwc" Nov 25 17:00:30 crc kubenswrapper[4812]: I1125 17:00:30.207798 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/108d5cdaa24cb5b2d5ae90ff353b9bcddac02d61d124585e89f7ceb41cn8cw2" Nov 25 17:00:30 crc kubenswrapper[4812]: I1125 17:00:30.237540 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bgpwc"] Nov 25 17:00:30 crc kubenswrapper[4812]: W1125 17:00:30.242860 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod655c4c3b_beac_4aef_a366_556c01011bdc.slice/crio-2cda7c46939bd972b6e8a8b38e79fab360d42880fd75ec25f9df5a5f479eddc6 WatchSource:0}: Error finding container 2cda7c46939bd972b6e8a8b38e79fab360d42880fd75ec25f9df5a5f479eddc6: Status 404 returned error can't find the container with id 2cda7c46939bd972b6e8a8b38e79fab360d42880fd75ec25f9df5a5f479eddc6 Nov 25 17:00:30 crc kubenswrapper[4812]: I1125 17:00:30.324015 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf46l\" (UniqueName: \"kubernetes.io/projected/3597e30c-17c6-408b-b584-fe1a9907359a-kube-api-access-gf46l\") pod \"3597e30c-17c6-408b-b584-fe1a9907359a\" (UID: \"3597e30c-17c6-408b-b584-fe1a9907359a\") " Nov 25 17:00:30 crc kubenswrapper[4812]: I1125 17:00:30.324190 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3597e30c-17c6-408b-b584-fe1a9907359a-util\") pod \"3597e30c-17c6-408b-b584-fe1a9907359a\" (UID: \"3597e30c-17c6-408b-b584-fe1a9907359a\") " Nov 25 17:00:30 crc kubenswrapper[4812]: I1125 17:00:30.324247 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3597e30c-17c6-408b-b584-fe1a9907359a-bundle\") pod \"3597e30c-17c6-408b-b584-fe1a9907359a\" (UID: \"3597e30c-17c6-408b-b584-fe1a9907359a\") " Nov 25 17:00:30 crc kubenswrapper[4812]: I1125 17:00:30.325124 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3597e30c-17c6-408b-b584-fe1a9907359a-bundle" (OuterVolumeSpecName: "bundle") pod "3597e30c-17c6-408b-b584-fe1a9907359a" (UID: "3597e30c-17c6-408b-b584-fe1a9907359a"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:00:30 crc kubenswrapper[4812]: I1125 17:00:30.329316 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3597e30c-17c6-408b-b584-fe1a9907359a-kube-api-access-gf46l" (OuterVolumeSpecName: "kube-api-access-gf46l") pod "3597e30c-17c6-408b-b584-fe1a9907359a" (UID: "3597e30c-17c6-408b-b584-fe1a9907359a"). InnerVolumeSpecName "kube-api-access-gf46l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:00:30 crc kubenswrapper[4812]: I1125 17:00:30.338042 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3597e30c-17c6-408b-b584-fe1a9907359a-util" (OuterVolumeSpecName: "util") pod "3597e30c-17c6-408b-b584-fe1a9907359a" (UID: "3597e30c-17c6-408b-b584-fe1a9907359a"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:00:30 crc kubenswrapper[4812]: I1125 17:00:30.425843 4812 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3597e30c-17c6-408b-b584-fe1a9907359a-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:00:30 crc kubenswrapper[4812]: I1125 17:00:30.425882 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf46l\" (UniqueName: \"kubernetes.io/projected/3597e30c-17c6-408b-b584-fe1a9907359a-kube-api-access-gf46l\") on node \"crc\" DevicePath \"\"" Nov 25 17:00:30 crc kubenswrapper[4812]: I1125 17:00:30.425895 4812 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3597e30c-17c6-408b-b584-fe1a9907359a-util\") on node \"crc\" DevicePath \"\"" Nov 25 17:00:30 crc kubenswrapper[4812]: I1125 17:00:30.935042 4812 generic.go:334] "Generic (PLEG): container finished" podID="655c4c3b-beac-4aef-a366-556c01011bdc" containerID="fb691f73575a3b50d9769e1d11933ecf58e6f8dd2df4246f8e1bb7c0b1cd4667" exitCode=0 Nov 25 17:00:30 crc kubenswrapper[4812]: I1125 17:00:30.935154 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bgpwc" event={"ID":"655c4c3b-beac-4aef-a366-556c01011bdc","Type":"ContainerDied","Data":"fb691f73575a3b50d9769e1d11933ecf58e6f8dd2df4246f8e1bb7c0b1cd4667"} Nov 25 17:00:30 crc kubenswrapper[4812]: I1125 17:00:30.935214 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bgpwc" event={"ID":"655c4c3b-beac-4aef-a366-556c01011bdc","Type":"ContainerStarted","Data":"2cda7c46939bd972b6e8a8b38e79fab360d42880fd75ec25f9df5a5f479eddc6"} Nov 25 17:00:30 crc kubenswrapper[4812]: I1125 17:00:30.937749 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/108d5cdaa24cb5b2d5ae90ff353b9bcddac02d61d124585e89f7ceb41cn8cw2" event={"ID":"3597e30c-17c6-408b-b584-fe1a9907359a","Type":"ContainerDied","Data":"50ff62bf4c4acc2faefebe116c674085cbfcabc0faf79ae7316395b30e218cf4"} Nov 25 17:00:30 crc kubenswrapper[4812]: I1125 17:00:30.937792 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="50ff62bf4c4acc2faefebe116c674085cbfcabc0faf79ae7316395b30e218cf4" Nov 25 17:00:30 crc kubenswrapper[4812]: I1125 17:00:30.937890 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/108d5cdaa24cb5b2d5ae90ff353b9bcddac02d61d124585e89f7ceb41cn8cw2" Nov 25 17:00:32 crc kubenswrapper[4812]: I1125 17:00:32.954828 4812 generic.go:334] "Generic (PLEG): container finished" podID="655c4c3b-beac-4aef-a366-556c01011bdc" containerID="7865e92300c0e136e0d0284142e12a24aed02ef7825e513227e2a36e7c2c5459" exitCode=0 Nov 25 17:00:32 crc kubenswrapper[4812]: I1125 17:00:32.954949 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bgpwc" event={"ID":"655c4c3b-beac-4aef-a366-556c01011bdc","Type":"ContainerDied","Data":"7865e92300c0e136e0d0284142e12a24aed02ef7825e513227e2a36e7c2c5459"} Nov 25 17:00:33 crc kubenswrapper[4812]: I1125 17:00:33.962889 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bgpwc" event={"ID":"655c4c3b-beac-4aef-a366-556c01011bdc","Type":"ContainerStarted","Data":"9873a8613916237d20347b291f77f875e59c34d25e92884ad78f3f45b708d0b2"} Nov 25 17:00:33 crc kubenswrapper[4812]: I1125 17:00:33.979892 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-bgpwc" podStartSLOduration=2.5445853080000003 podStartE2EDuration="4.979867113s" podCreationTimestamp="2025-11-25 17:00:29 +0000 UTC" firstStartedPulling="2025-11-25 17:00:30.936948017 +0000 UTC m=+805.777090112" lastFinishedPulling="2025-11-25 17:00:33.372229822 +0000 UTC m=+808.212371917" observedRunningTime="2025-11-25 17:00:33.978346447 +0000 UTC m=+808.818488552" watchObservedRunningTime="2025-11-25 17:00:33.979867113 +0000 UTC m=+808.820009208" Nov 25 17:00:35 crc kubenswrapper[4812]: I1125 17:00:35.314179 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-55cd74b98b-lcn4x"] Nov 25 17:00:35 crc kubenswrapper[4812]: E1125 17:00:35.314816 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3597e30c-17c6-408b-b584-fe1a9907359a" containerName="util" Nov 25 17:00:35 crc kubenswrapper[4812]: I1125 17:00:35.314831 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="3597e30c-17c6-408b-b584-fe1a9907359a" containerName="util" Nov 25 17:00:35 crc kubenswrapper[4812]: E1125 17:00:35.314847 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3597e30c-17c6-408b-b584-fe1a9907359a" containerName="extract" Nov 25 17:00:35 crc kubenswrapper[4812]: I1125 17:00:35.314853 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="3597e30c-17c6-408b-b584-fe1a9907359a" containerName="extract" Nov 25 17:00:35 crc kubenswrapper[4812]: E1125 17:00:35.314868 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3597e30c-17c6-408b-b584-fe1a9907359a" containerName="pull" Nov 25 17:00:35 crc kubenswrapper[4812]: I1125 17:00:35.314874 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="3597e30c-17c6-408b-b584-fe1a9907359a" containerName="pull" Nov 25 17:00:35 crc kubenswrapper[4812]: I1125 17:00:35.314986 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="3597e30c-17c6-408b-b584-fe1a9907359a" containerName="extract" Nov 25 17:00:35 crc kubenswrapper[4812]: I1125 17:00:35.315710 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-55cd74b98b-lcn4x" Nov 25 17:00:35 crc kubenswrapper[4812]: I1125 17:00:35.318105 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-wl74c" Nov 25 17:00:35 crc kubenswrapper[4812]: I1125 17:00:35.336847 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-55cd74b98b-lcn4x"] Nov 25 17:00:35 crc kubenswrapper[4812]: I1125 17:00:35.497338 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jxwzz\" (UniqueName: \"kubernetes.io/projected/2023c319-572e-4d1d-bb2a-56e3842430db-kube-api-access-jxwzz\") pod \"openstack-operator-controller-operator-55cd74b98b-lcn4x\" (UID: \"2023c319-572e-4d1d-bb2a-56e3842430db\") " pod="openstack-operators/openstack-operator-controller-operator-55cd74b98b-lcn4x" Nov 25 17:00:35 crc kubenswrapper[4812]: I1125 17:00:35.598512 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jxwzz\" (UniqueName: \"kubernetes.io/projected/2023c319-572e-4d1d-bb2a-56e3842430db-kube-api-access-jxwzz\") pod \"openstack-operator-controller-operator-55cd74b98b-lcn4x\" (UID: \"2023c319-572e-4d1d-bb2a-56e3842430db\") " pod="openstack-operators/openstack-operator-controller-operator-55cd74b98b-lcn4x" Nov 25 17:00:35 crc kubenswrapper[4812]: I1125 17:00:35.619843 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jxwzz\" (UniqueName: \"kubernetes.io/projected/2023c319-572e-4d1d-bb2a-56e3842430db-kube-api-access-jxwzz\") pod \"openstack-operator-controller-operator-55cd74b98b-lcn4x\" (UID: \"2023c319-572e-4d1d-bb2a-56e3842430db\") " pod="openstack-operators/openstack-operator-controller-operator-55cd74b98b-lcn4x" Nov 25 17:00:35 crc kubenswrapper[4812]: I1125 17:00:35.631239 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-55cd74b98b-lcn4x" Nov 25 17:00:36 crc kubenswrapper[4812]: I1125 17:00:36.032551 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-55cd74b98b-lcn4x"] Nov 25 17:00:36 crc kubenswrapper[4812]: W1125 17:00:36.036778 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2023c319_572e_4d1d_bb2a_56e3842430db.slice/crio-bd01eeab65e0311601db23637b89ce2ade1ca953f5ad9bbe7950741efd21f839 WatchSource:0}: Error finding container bd01eeab65e0311601db23637b89ce2ade1ca953f5ad9bbe7950741efd21f839: Status 404 returned error can't find the container with id bd01eeab65e0311601db23637b89ce2ade1ca953f5ad9bbe7950741efd21f839 Nov 25 17:00:36 crc kubenswrapper[4812]: I1125 17:00:36.980437 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-55cd74b98b-lcn4x" event={"ID":"2023c319-572e-4d1d-bb2a-56e3842430db","Type":"ContainerStarted","Data":"bd01eeab65e0311601db23637b89ce2ade1ca953f5ad9bbe7950741efd21f839"} Nov 25 17:00:39 crc kubenswrapper[4812]: I1125 17:00:39.322485 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-8n7t6"] Nov 25 17:00:39 crc kubenswrapper[4812]: I1125 17:00:39.324204 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8n7t6" Nov 25 17:00:39 crc kubenswrapper[4812]: I1125 17:00:39.336864 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8n7t6"] Nov 25 17:00:39 crc kubenswrapper[4812]: I1125 17:00:39.456175 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e8a6c88-4bd4-488f-821d-36c9cb50399a-catalog-content\") pod \"certified-operators-8n7t6\" (UID: \"2e8a6c88-4bd4-488f-821d-36c9cb50399a\") " pod="openshift-marketplace/certified-operators-8n7t6" Nov 25 17:00:39 crc kubenswrapper[4812]: I1125 17:00:39.456254 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7lhrk\" (UniqueName: \"kubernetes.io/projected/2e8a6c88-4bd4-488f-821d-36c9cb50399a-kube-api-access-7lhrk\") pod \"certified-operators-8n7t6\" (UID: \"2e8a6c88-4bd4-488f-821d-36c9cb50399a\") " pod="openshift-marketplace/certified-operators-8n7t6" Nov 25 17:00:39 crc kubenswrapper[4812]: I1125 17:00:39.456342 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e8a6c88-4bd4-488f-821d-36c9cb50399a-utilities\") pod \"certified-operators-8n7t6\" (UID: \"2e8a6c88-4bd4-488f-821d-36c9cb50399a\") " pod="openshift-marketplace/certified-operators-8n7t6" Nov 25 17:00:39 crc kubenswrapper[4812]: I1125 17:00:39.558124 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7lhrk\" (UniqueName: \"kubernetes.io/projected/2e8a6c88-4bd4-488f-821d-36c9cb50399a-kube-api-access-7lhrk\") pod \"certified-operators-8n7t6\" (UID: \"2e8a6c88-4bd4-488f-821d-36c9cb50399a\") " pod="openshift-marketplace/certified-operators-8n7t6" Nov 25 17:00:39 crc kubenswrapper[4812]: I1125 17:00:39.558236 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e8a6c88-4bd4-488f-821d-36c9cb50399a-utilities\") pod \"certified-operators-8n7t6\" (UID: \"2e8a6c88-4bd4-488f-821d-36c9cb50399a\") " pod="openshift-marketplace/certified-operators-8n7t6" Nov 25 17:00:39 crc kubenswrapper[4812]: I1125 17:00:39.558276 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e8a6c88-4bd4-488f-821d-36c9cb50399a-catalog-content\") pod \"certified-operators-8n7t6\" (UID: \"2e8a6c88-4bd4-488f-821d-36c9cb50399a\") " pod="openshift-marketplace/certified-operators-8n7t6" Nov 25 17:00:39 crc kubenswrapper[4812]: I1125 17:00:39.559206 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e8a6c88-4bd4-488f-821d-36c9cb50399a-catalog-content\") pod \"certified-operators-8n7t6\" (UID: \"2e8a6c88-4bd4-488f-821d-36c9cb50399a\") " pod="openshift-marketplace/certified-operators-8n7t6" Nov 25 17:00:39 crc kubenswrapper[4812]: I1125 17:00:39.559722 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e8a6c88-4bd4-488f-821d-36c9cb50399a-utilities\") pod \"certified-operators-8n7t6\" (UID: \"2e8a6c88-4bd4-488f-821d-36c9cb50399a\") " pod="openshift-marketplace/certified-operators-8n7t6" Nov 25 17:00:39 crc kubenswrapper[4812]: I1125 17:00:39.576659 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7lhrk\" (UniqueName: \"kubernetes.io/projected/2e8a6c88-4bd4-488f-821d-36c9cb50399a-kube-api-access-7lhrk\") pod \"certified-operators-8n7t6\" (UID: \"2e8a6c88-4bd4-488f-821d-36c9cb50399a\") " pod="openshift-marketplace/certified-operators-8n7t6" Nov 25 17:00:39 crc kubenswrapper[4812]: I1125 17:00:39.647136 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8n7t6" Nov 25 17:00:39 crc kubenswrapper[4812]: I1125 17:00:39.852132 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-bgpwc" Nov 25 17:00:39 crc kubenswrapper[4812]: I1125 17:00:39.852519 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-bgpwc" Nov 25 17:00:39 crc kubenswrapper[4812]: I1125 17:00:39.898464 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-bgpwc" Nov 25 17:00:40 crc kubenswrapper[4812]: I1125 17:00:40.018736 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-55cd74b98b-lcn4x" event={"ID":"2023c319-572e-4d1d-bb2a-56e3842430db","Type":"ContainerStarted","Data":"ec8b03fb1be3d0d049a1d713c6a34ceb9bbb5c3e709af78690096b8950502244"} Nov 25 17:00:40 crc kubenswrapper[4812]: I1125 17:00:40.019083 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-55cd74b98b-lcn4x" Nov 25 17:00:40 crc kubenswrapper[4812]: I1125 17:00:40.050600 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-55cd74b98b-lcn4x" podStartSLOduration=1.496235217 podStartE2EDuration="5.050580645s" podCreationTimestamp="2025-11-25 17:00:35 +0000 UTC" firstStartedPulling="2025-11-25 17:00:36.038721506 +0000 UTC m=+810.878863601" lastFinishedPulling="2025-11-25 17:00:39.593066934 +0000 UTC m=+814.433209029" observedRunningTime="2025-11-25 17:00:40.048889064 +0000 UTC m=+814.889031169" watchObservedRunningTime="2025-11-25 17:00:40.050580645 +0000 UTC m=+814.890722740" Nov 25 17:00:40 crc kubenswrapper[4812]: I1125 17:00:40.066356 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8n7t6"] Nov 25 17:00:40 crc kubenswrapper[4812]: I1125 17:00:40.070040 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-bgpwc" Nov 25 17:00:41 crc kubenswrapper[4812]: I1125 17:00:41.026659 4812 generic.go:334] "Generic (PLEG): container finished" podID="2e8a6c88-4bd4-488f-821d-36c9cb50399a" containerID="7da0523ac74faf02dd5b72089ca7dfcf61823dacbdee28f80f65fbbf66e1dd5b" exitCode=0 Nov 25 17:00:41 crc kubenswrapper[4812]: I1125 17:00:41.026769 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8n7t6" event={"ID":"2e8a6c88-4bd4-488f-821d-36c9cb50399a","Type":"ContainerDied","Data":"7da0523ac74faf02dd5b72089ca7dfcf61823dacbdee28f80f65fbbf66e1dd5b"} Nov 25 17:00:41 crc kubenswrapper[4812]: I1125 17:00:41.027058 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8n7t6" event={"ID":"2e8a6c88-4bd4-488f-821d-36c9cb50399a","Type":"ContainerStarted","Data":"0588b959b7495ca1d8c0826ba2800724647c88ff8aab2b6c27f1136d4d5f5aef"} Nov 25 17:00:42 crc kubenswrapper[4812]: I1125 17:00:42.034701 4812 generic.go:334] "Generic (PLEG): container finished" podID="2e8a6c88-4bd4-488f-821d-36c9cb50399a" containerID="bffdb5e8c5a4c11c11e7c2bbab6e7fb6726d3d87eac9b6654abcfd44d2b22674" exitCode=0 Nov 25 17:00:42 crc kubenswrapper[4812]: I1125 17:00:42.034830 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8n7t6" event={"ID":"2e8a6c88-4bd4-488f-821d-36c9cb50399a","Type":"ContainerDied","Data":"bffdb5e8c5a4c11c11e7c2bbab6e7fb6726d3d87eac9b6654abcfd44d2b22674"} Nov 25 17:00:43 crc kubenswrapper[4812]: I1125 17:00:43.043838 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8n7t6" event={"ID":"2e8a6c88-4bd4-488f-821d-36c9cb50399a","Type":"ContainerStarted","Data":"8c8f85c87dc7707f0cea5d9b2dd185ba3d9c84b3cc3a376b37b74c1c9ca4c4d2"} Nov 25 17:00:43 crc kubenswrapper[4812]: I1125 17:00:43.061625 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-8n7t6" podStartSLOduration=2.430784509 podStartE2EDuration="4.061603723s" podCreationTimestamp="2025-11-25 17:00:39 +0000 UTC" firstStartedPulling="2025-11-25 17:00:41.028284047 +0000 UTC m=+815.868426142" lastFinishedPulling="2025-11-25 17:00:42.659103261 +0000 UTC m=+817.499245356" observedRunningTime="2025-11-25 17:00:43.059154225 +0000 UTC m=+817.899296330" watchObservedRunningTime="2025-11-25 17:00:43.061603723 +0000 UTC m=+817.901745818" Nov 25 17:00:43 crc kubenswrapper[4812]: I1125 17:00:43.509949 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bgpwc"] Nov 25 17:00:43 crc kubenswrapper[4812]: I1125 17:00:43.510390 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-bgpwc" podUID="655c4c3b-beac-4aef-a366-556c01011bdc" containerName="registry-server" containerID="cri-o://9873a8613916237d20347b291f77f875e59c34d25e92884ad78f3f45b708d0b2" gracePeriod=2 Nov 25 17:00:43 crc kubenswrapper[4812]: I1125 17:00:43.882805 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bgpwc" Nov 25 17:00:44 crc kubenswrapper[4812]: I1125 17:00:44.019453 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4ftvg\" (UniqueName: \"kubernetes.io/projected/655c4c3b-beac-4aef-a366-556c01011bdc-kube-api-access-4ftvg\") pod \"655c4c3b-beac-4aef-a366-556c01011bdc\" (UID: \"655c4c3b-beac-4aef-a366-556c01011bdc\") " Nov 25 17:00:44 crc kubenswrapper[4812]: I1125 17:00:44.019619 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/655c4c3b-beac-4aef-a366-556c01011bdc-catalog-content\") pod \"655c4c3b-beac-4aef-a366-556c01011bdc\" (UID: \"655c4c3b-beac-4aef-a366-556c01011bdc\") " Nov 25 17:00:44 crc kubenswrapper[4812]: I1125 17:00:44.019709 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/655c4c3b-beac-4aef-a366-556c01011bdc-utilities\") pod \"655c4c3b-beac-4aef-a366-556c01011bdc\" (UID: \"655c4c3b-beac-4aef-a366-556c01011bdc\") " Nov 25 17:00:44 crc kubenswrapper[4812]: I1125 17:00:44.020588 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/655c4c3b-beac-4aef-a366-556c01011bdc-utilities" (OuterVolumeSpecName: "utilities") pod "655c4c3b-beac-4aef-a366-556c01011bdc" (UID: "655c4c3b-beac-4aef-a366-556c01011bdc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:00:44 crc kubenswrapper[4812]: I1125 17:00:44.025726 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/655c4c3b-beac-4aef-a366-556c01011bdc-kube-api-access-4ftvg" (OuterVolumeSpecName: "kube-api-access-4ftvg") pod "655c4c3b-beac-4aef-a366-556c01011bdc" (UID: "655c4c3b-beac-4aef-a366-556c01011bdc"). InnerVolumeSpecName "kube-api-access-4ftvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:00:44 crc kubenswrapper[4812]: I1125 17:00:44.035054 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/655c4c3b-beac-4aef-a366-556c01011bdc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "655c4c3b-beac-4aef-a366-556c01011bdc" (UID: "655c4c3b-beac-4aef-a366-556c01011bdc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:00:44 crc kubenswrapper[4812]: I1125 17:00:44.051628 4812 generic.go:334] "Generic (PLEG): container finished" podID="655c4c3b-beac-4aef-a366-556c01011bdc" containerID="9873a8613916237d20347b291f77f875e59c34d25e92884ad78f3f45b708d0b2" exitCode=0 Nov 25 17:00:44 crc kubenswrapper[4812]: I1125 17:00:44.052248 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bgpwc" Nov 25 17:00:44 crc kubenswrapper[4812]: I1125 17:00:44.052356 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bgpwc" event={"ID":"655c4c3b-beac-4aef-a366-556c01011bdc","Type":"ContainerDied","Data":"9873a8613916237d20347b291f77f875e59c34d25e92884ad78f3f45b708d0b2"} Nov 25 17:00:44 crc kubenswrapper[4812]: I1125 17:00:44.052762 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bgpwc" event={"ID":"655c4c3b-beac-4aef-a366-556c01011bdc","Type":"ContainerDied","Data":"2cda7c46939bd972b6e8a8b38e79fab360d42880fd75ec25f9df5a5f479eddc6"} Nov 25 17:00:44 crc kubenswrapper[4812]: I1125 17:00:44.052785 4812 scope.go:117] "RemoveContainer" containerID="9873a8613916237d20347b291f77f875e59c34d25e92884ad78f3f45b708d0b2" Nov 25 17:00:44 crc kubenswrapper[4812]: I1125 17:00:44.072116 4812 scope.go:117] "RemoveContainer" containerID="7865e92300c0e136e0d0284142e12a24aed02ef7825e513227e2a36e7c2c5459" Nov 25 17:00:44 crc kubenswrapper[4812]: I1125 17:00:44.083293 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bgpwc"] Nov 25 17:00:44 crc kubenswrapper[4812]: I1125 17:00:44.088629 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-bgpwc"] Nov 25 17:00:44 crc kubenswrapper[4812]: I1125 17:00:44.105715 4812 scope.go:117] "RemoveContainer" containerID="fb691f73575a3b50d9769e1d11933ecf58e6f8dd2df4246f8e1bb7c0b1cd4667" Nov 25 17:00:44 crc kubenswrapper[4812]: I1125 17:00:44.121883 4812 scope.go:117] "RemoveContainer" containerID="9873a8613916237d20347b291f77f875e59c34d25e92884ad78f3f45b708d0b2" Nov 25 17:00:44 crc kubenswrapper[4812]: I1125 17:00:44.122186 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/655c4c3b-beac-4aef-a366-556c01011bdc-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:00:44 crc kubenswrapper[4812]: I1125 17:00:44.122235 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4ftvg\" (UniqueName: \"kubernetes.io/projected/655c4c3b-beac-4aef-a366-556c01011bdc-kube-api-access-4ftvg\") on node \"crc\" DevicePath \"\"" Nov 25 17:00:44 crc kubenswrapper[4812]: I1125 17:00:44.122252 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/655c4c3b-beac-4aef-a366-556c01011bdc-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:00:44 crc kubenswrapper[4812]: E1125 17:00:44.122232 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9873a8613916237d20347b291f77f875e59c34d25e92884ad78f3f45b708d0b2\": container with ID starting with 9873a8613916237d20347b291f77f875e59c34d25e92884ad78f3f45b708d0b2 not found: ID does not exist" containerID="9873a8613916237d20347b291f77f875e59c34d25e92884ad78f3f45b708d0b2" Nov 25 17:00:44 crc kubenswrapper[4812]: I1125 17:00:44.122300 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9873a8613916237d20347b291f77f875e59c34d25e92884ad78f3f45b708d0b2"} err="failed to get container status \"9873a8613916237d20347b291f77f875e59c34d25e92884ad78f3f45b708d0b2\": rpc error: code = NotFound desc = could not find container \"9873a8613916237d20347b291f77f875e59c34d25e92884ad78f3f45b708d0b2\": container with ID starting with 9873a8613916237d20347b291f77f875e59c34d25e92884ad78f3f45b708d0b2 not found: ID does not exist" Nov 25 17:00:44 crc kubenswrapper[4812]: I1125 17:00:44.122339 4812 scope.go:117] "RemoveContainer" containerID="7865e92300c0e136e0d0284142e12a24aed02ef7825e513227e2a36e7c2c5459" Nov 25 17:00:44 crc kubenswrapper[4812]: E1125 17:00:44.122919 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7865e92300c0e136e0d0284142e12a24aed02ef7825e513227e2a36e7c2c5459\": container with ID starting with 7865e92300c0e136e0d0284142e12a24aed02ef7825e513227e2a36e7c2c5459 not found: ID does not exist" containerID="7865e92300c0e136e0d0284142e12a24aed02ef7825e513227e2a36e7c2c5459" Nov 25 17:00:44 crc kubenswrapper[4812]: I1125 17:00:44.122960 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7865e92300c0e136e0d0284142e12a24aed02ef7825e513227e2a36e7c2c5459"} err="failed to get container status \"7865e92300c0e136e0d0284142e12a24aed02ef7825e513227e2a36e7c2c5459\": rpc error: code = NotFound desc = could not find container \"7865e92300c0e136e0d0284142e12a24aed02ef7825e513227e2a36e7c2c5459\": container with ID starting with 7865e92300c0e136e0d0284142e12a24aed02ef7825e513227e2a36e7c2c5459 not found: ID does not exist" Nov 25 17:00:44 crc kubenswrapper[4812]: I1125 17:00:44.122990 4812 scope.go:117] "RemoveContainer" containerID="fb691f73575a3b50d9769e1d11933ecf58e6f8dd2df4246f8e1bb7c0b1cd4667" Nov 25 17:00:44 crc kubenswrapper[4812]: E1125 17:00:44.123414 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fb691f73575a3b50d9769e1d11933ecf58e6f8dd2df4246f8e1bb7c0b1cd4667\": container with ID starting with fb691f73575a3b50d9769e1d11933ecf58e6f8dd2df4246f8e1bb7c0b1cd4667 not found: ID does not exist" containerID="fb691f73575a3b50d9769e1d11933ecf58e6f8dd2df4246f8e1bb7c0b1cd4667" Nov 25 17:00:44 crc kubenswrapper[4812]: I1125 17:00:44.123437 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fb691f73575a3b50d9769e1d11933ecf58e6f8dd2df4246f8e1bb7c0b1cd4667"} err="failed to get container status \"fb691f73575a3b50d9769e1d11933ecf58e6f8dd2df4246f8e1bb7c0b1cd4667\": rpc error: code = NotFound desc = could not find container \"fb691f73575a3b50d9769e1d11933ecf58e6f8dd2df4246f8e1bb7c0b1cd4667\": container with ID starting with fb691f73575a3b50d9769e1d11933ecf58e6f8dd2df4246f8e1bb7c0b1cd4667 not found: ID does not exist" Nov 25 17:00:45 crc kubenswrapper[4812]: I1125 17:00:45.634356 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-55cd74b98b-lcn4x" Nov 25 17:00:45 crc kubenswrapper[4812]: I1125 17:00:45.841598 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="655c4c3b-beac-4aef-a366-556c01011bdc" path="/var/lib/kubelet/pods/655c4c3b-beac-4aef-a366-556c01011bdc/volumes" Nov 25 17:00:49 crc kubenswrapper[4812]: I1125 17:00:49.118173 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-xm2ss"] Nov 25 17:00:49 crc kubenswrapper[4812]: E1125 17:00:49.118833 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="655c4c3b-beac-4aef-a366-556c01011bdc" containerName="extract-content" Nov 25 17:00:49 crc kubenswrapper[4812]: I1125 17:00:49.118851 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="655c4c3b-beac-4aef-a366-556c01011bdc" containerName="extract-content" Nov 25 17:00:49 crc kubenswrapper[4812]: E1125 17:00:49.118866 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="655c4c3b-beac-4aef-a366-556c01011bdc" containerName="extract-utilities" Nov 25 17:00:49 crc kubenswrapper[4812]: I1125 17:00:49.118874 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="655c4c3b-beac-4aef-a366-556c01011bdc" containerName="extract-utilities" Nov 25 17:00:49 crc kubenswrapper[4812]: E1125 17:00:49.118885 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="655c4c3b-beac-4aef-a366-556c01011bdc" containerName="registry-server" Nov 25 17:00:49 crc kubenswrapper[4812]: I1125 17:00:49.118893 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="655c4c3b-beac-4aef-a366-556c01011bdc" containerName="registry-server" Nov 25 17:00:49 crc kubenswrapper[4812]: I1125 17:00:49.119022 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="655c4c3b-beac-4aef-a366-556c01011bdc" containerName="registry-server" Nov 25 17:00:49 crc kubenswrapper[4812]: I1125 17:00:49.120030 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xm2ss" Nov 25 17:00:49 crc kubenswrapper[4812]: I1125 17:00:49.129121 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xm2ss"] Nov 25 17:00:49 crc kubenswrapper[4812]: I1125 17:00:49.199398 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nccfs\" (UniqueName: \"kubernetes.io/projected/2867d1ef-c470-499f-993e-7c18782611b6-kube-api-access-nccfs\") pod \"redhat-operators-xm2ss\" (UID: \"2867d1ef-c470-499f-993e-7c18782611b6\") " pod="openshift-marketplace/redhat-operators-xm2ss" Nov 25 17:00:49 crc kubenswrapper[4812]: I1125 17:00:49.199494 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2867d1ef-c470-499f-993e-7c18782611b6-utilities\") pod \"redhat-operators-xm2ss\" (UID: \"2867d1ef-c470-499f-993e-7c18782611b6\") " pod="openshift-marketplace/redhat-operators-xm2ss" Nov 25 17:00:49 crc kubenswrapper[4812]: I1125 17:00:49.199588 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2867d1ef-c470-499f-993e-7c18782611b6-catalog-content\") pod \"redhat-operators-xm2ss\" (UID: \"2867d1ef-c470-499f-993e-7c18782611b6\") " pod="openshift-marketplace/redhat-operators-xm2ss" Nov 25 17:00:49 crc kubenswrapper[4812]: I1125 17:00:49.301607 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2867d1ef-c470-499f-993e-7c18782611b6-catalog-content\") pod \"redhat-operators-xm2ss\" (UID: \"2867d1ef-c470-499f-993e-7c18782611b6\") " pod="openshift-marketplace/redhat-operators-xm2ss" Nov 25 17:00:49 crc kubenswrapper[4812]: I1125 17:00:49.301690 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nccfs\" (UniqueName: \"kubernetes.io/projected/2867d1ef-c470-499f-993e-7c18782611b6-kube-api-access-nccfs\") pod \"redhat-operators-xm2ss\" (UID: \"2867d1ef-c470-499f-993e-7c18782611b6\") " pod="openshift-marketplace/redhat-operators-xm2ss" Nov 25 17:00:49 crc kubenswrapper[4812]: I1125 17:00:49.301765 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2867d1ef-c470-499f-993e-7c18782611b6-utilities\") pod \"redhat-operators-xm2ss\" (UID: \"2867d1ef-c470-499f-993e-7c18782611b6\") " pod="openshift-marketplace/redhat-operators-xm2ss" Nov 25 17:00:49 crc kubenswrapper[4812]: I1125 17:00:49.302244 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2867d1ef-c470-499f-993e-7c18782611b6-catalog-content\") pod \"redhat-operators-xm2ss\" (UID: \"2867d1ef-c470-499f-993e-7c18782611b6\") " pod="openshift-marketplace/redhat-operators-xm2ss" Nov 25 17:00:49 crc kubenswrapper[4812]: I1125 17:00:49.302334 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2867d1ef-c470-499f-993e-7c18782611b6-utilities\") pod \"redhat-operators-xm2ss\" (UID: \"2867d1ef-c470-499f-993e-7c18782611b6\") " pod="openshift-marketplace/redhat-operators-xm2ss" Nov 25 17:00:49 crc kubenswrapper[4812]: I1125 17:00:49.323663 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nccfs\" (UniqueName: \"kubernetes.io/projected/2867d1ef-c470-499f-993e-7c18782611b6-kube-api-access-nccfs\") pod \"redhat-operators-xm2ss\" (UID: \"2867d1ef-c470-499f-993e-7c18782611b6\") " pod="openshift-marketplace/redhat-operators-xm2ss" Nov 25 17:00:49 crc kubenswrapper[4812]: I1125 17:00:49.444191 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xm2ss" Nov 25 17:00:49 crc kubenswrapper[4812]: I1125 17:00:49.648889 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-8n7t6" Nov 25 17:00:49 crc kubenswrapper[4812]: I1125 17:00:49.649225 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-8n7t6" Nov 25 17:00:49 crc kubenswrapper[4812]: I1125 17:00:49.711971 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xm2ss"] Nov 25 17:00:49 crc kubenswrapper[4812]: I1125 17:00:49.766305 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-8n7t6" Nov 25 17:00:50 crc kubenswrapper[4812]: I1125 17:00:50.102832 4812 generic.go:334] "Generic (PLEG): container finished" podID="2867d1ef-c470-499f-993e-7c18782611b6" containerID="679ca533e42cd418ea851f7c08ebc4fe59adc4394cac550ce98544182de3e83e" exitCode=0 Nov 25 17:00:50 crc kubenswrapper[4812]: I1125 17:00:50.103111 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xm2ss" event={"ID":"2867d1ef-c470-499f-993e-7c18782611b6","Type":"ContainerDied","Data":"679ca533e42cd418ea851f7c08ebc4fe59adc4394cac550ce98544182de3e83e"} Nov 25 17:00:50 crc kubenswrapper[4812]: I1125 17:00:50.103166 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xm2ss" event={"ID":"2867d1ef-c470-499f-993e-7c18782611b6","Type":"ContainerStarted","Data":"7094ab3b1bd7497d04963a9e068f0b20a7dab44c27a42afae535140d6ed366df"} Nov 25 17:00:50 crc kubenswrapper[4812]: I1125 17:00:50.147611 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-8n7t6" Nov 25 17:00:51 crc kubenswrapper[4812]: I1125 17:00:51.110439 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xm2ss" event={"ID":"2867d1ef-c470-499f-993e-7c18782611b6","Type":"ContainerStarted","Data":"c76ba04ff6eafd9b7d08a5d92bd2776430225f335c95870646c3fdff161a7419"} Nov 25 17:00:52 crc kubenswrapper[4812]: I1125 17:00:52.117928 4812 generic.go:334] "Generic (PLEG): container finished" podID="2867d1ef-c470-499f-993e-7c18782611b6" containerID="c76ba04ff6eafd9b7d08a5d92bd2776430225f335c95870646c3fdff161a7419" exitCode=0 Nov 25 17:00:52 crc kubenswrapper[4812]: I1125 17:00:52.118029 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xm2ss" event={"ID":"2867d1ef-c470-499f-993e-7c18782611b6","Type":"ContainerDied","Data":"c76ba04ff6eafd9b7d08a5d92bd2776430225f335c95870646c3fdff161a7419"} Nov 25 17:00:53 crc kubenswrapper[4812]: I1125 17:00:53.128329 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xm2ss" event={"ID":"2867d1ef-c470-499f-993e-7c18782611b6","Type":"ContainerStarted","Data":"a6b1b4eacb5ecd6983d576a32204c989fe4089c96bd54dbc7996c58a01ee3bd1"} Nov 25 17:00:53 crc kubenswrapper[4812]: I1125 17:00:53.148415 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-xm2ss" podStartSLOduration=1.718485909 podStartE2EDuration="4.148391857s" podCreationTimestamp="2025-11-25 17:00:49 +0000 UTC" firstStartedPulling="2025-11-25 17:00:50.105767538 +0000 UTC m=+824.945909623" lastFinishedPulling="2025-11-25 17:00:52.535673476 +0000 UTC m=+827.375815571" observedRunningTime="2025-11-25 17:00:53.144407422 +0000 UTC m=+827.984549527" watchObservedRunningTime="2025-11-25 17:00:53.148391857 +0000 UTC m=+827.988533952" Nov 25 17:00:53 crc kubenswrapper[4812]: I1125 17:00:53.711578 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8n7t6"] Nov 25 17:00:53 crc kubenswrapper[4812]: I1125 17:00:53.711892 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-8n7t6" podUID="2e8a6c88-4bd4-488f-821d-36c9cb50399a" containerName="registry-server" containerID="cri-o://8c8f85c87dc7707f0cea5d9b2dd185ba3d9c84b3cc3a376b37b74c1c9ca4c4d2" gracePeriod=2 Nov 25 17:00:54 crc kubenswrapper[4812]: I1125 17:00:54.081665 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8n7t6" Nov 25 17:00:54 crc kubenswrapper[4812]: I1125 17:00:54.136612 4812 generic.go:334] "Generic (PLEG): container finished" podID="2e8a6c88-4bd4-488f-821d-36c9cb50399a" containerID="8c8f85c87dc7707f0cea5d9b2dd185ba3d9c84b3cc3a376b37b74c1c9ca4c4d2" exitCode=0 Nov 25 17:00:54 crc kubenswrapper[4812]: I1125 17:00:54.136680 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8n7t6" Nov 25 17:00:54 crc kubenswrapper[4812]: I1125 17:00:54.136680 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8n7t6" event={"ID":"2e8a6c88-4bd4-488f-821d-36c9cb50399a","Type":"ContainerDied","Data":"8c8f85c87dc7707f0cea5d9b2dd185ba3d9c84b3cc3a376b37b74c1c9ca4c4d2"} Nov 25 17:00:54 crc kubenswrapper[4812]: I1125 17:00:54.136736 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8n7t6" event={"ID":"2e8a6c88-4bd4-488f-821d-36c9cb50399a","Type":"ContainerDied","Data":"0588b959b7495ca1d8c0826ba2800724647c88ff8aab2b6c27f1136d4d5f5aef"} Nov 25 17:00:54 crc kubenswrapper[4812]: I1125 17:00:54.136763 4812 scope.go:117] "RemoveContainer" containerID="8c8f85c87dc7707f0cea5d9b2dd185ba3d9c84b3cc3a376b37b74c1c9ca4c4d2" Nov 25 17:00:54 crc kubenswrapper[4812]: I1125 17:00:54.156433 4812 scope.go:117] "RemoveContainer" containerID="bffdb5e8c5a4c11c11e7c2bbab6e7fb6726d3d87eac9b6654abcfd44d2b22674" Nov 25 17:00:54 crc kubenswrapper[4812]: I1125 17:00:54.166957 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7lhrk\" (UniqueName: \"kubernetes.io/projected/2e8a6c88-4bd4-488f-821d-36c9cb50399a-kube-api-access-7lhrk\") pod \"2e8a6c88-4bd4-488f-821d-36c9cb50399a\" (UID: \"2e8a6c88-4bd4-488f-821d-36c9cb50399a\") " Nov 25 17:00:54 crc kubenswrapper[4812]: I1125 17:00:54.167042 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e8a6c88-4bd4-488f-821d-36c9cb50399a-catalog-content\") pod \"2e8a6c88-4bd4-488f-821d-36c9cb50399a\" (UID: \"2e8a6c88-4bd4-488f-821d-36c9cb50399a\") " Nov 25 17:00:54 crc kubenswrapper[4812]: I1125 17:00:54.167137 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e8a6c88-4bd4-488f-821d-36c9cb50399a-utilities\") pod \"2e8a6c88-4bd4-488f-821d-36c9cb50399a\" (UID: \"2e8a6c88-4bd4-488f-821d-36c9cb50399a\") " Nov 25 17:00:54 crc kubenswrapper[4812]: I1125 17:00:54.168132 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2e8a6c88-4bd4-488f-821d-36c9cb50399a-utilities" (OuterVolumeSpecName: "utilities") pod "2e8a6c88-4bd4-488f-821d-36c9cb50399a" (UID: "2e8a6c88-4bd4-488f-821d-36c9cb50399a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:00:54 crc kubenswrapper[4812]: I1125 17:00:54.172726 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e8a6c88-4bd4-488f-821d-36c9cb50399a-kube-api-access-7lhrk" (OuterVolumeSpecName: "kube-api-access-7lhrk") pod "2e8a6c88-4bd4-488f-821d-36c9cb50399a" (UID: "2e8a6c88-4bd4-488f-821d-36c9cb50399a"). InnerVolumeSpecName "kube-api-access-7lhrk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:00:54 crc kubenswrapper[4812]: I1125 17:00:54.176313 4812 scope.go:117] "RemoveContainer" containerID="7da0523ac74faf02dd5b72089ca7dfcf61823dacbdee28f80f65fbbf66e1dd5b" Nov 25 17:00:54 crc kubenswrapper[4812]: I1125 17:00:54.217903 4812 scope.go:117] "RemoveContainer" containerID="8c8f85c87dc7707f0cea5d9b2dd185ba3d9c84b3cc3a376b37b74c1c9ca4c4d2" Nov 25 17:00:54 crc kubenswrapper[4812]: E1125 17:00:54.218373 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8c8f85c87dc7707f0cea5d9b2dd185ba3d9c84b3cc3a376b37b74c1c9ca4c4d2\": container with ID starting with 8c8f85c87dc7707f0cea5d9b2dd185ba3d9c84b3cc3a376b37b74c1c9ca4c4d2 not found: ID does not exist" containerID="8c8f85c87dc7707f0cea5d9b2dd185ba3d9c84b3cc3a376b37b74c1c9ca4c4d2" Nov 25 17:00:54 crc kubenswrapper[4812]: I1125 17:00:54.218419 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c8f85c87dc7707f0cea5d9b2dd185ba3d9c84b3cc3a376b37b74c1c9ca4c4d2"} err="failed to get container status \"8c8f85c87dc7707f0cea5d9b2dd185ba3d9c84b3cc3a376b37b74c1c9ca4c4d2\": rpc error: code = NotFound desc = could not find container \"8c8f85c87dc7707f0cea5d9b2dd185ba3d9c84b3cc3a376b37b74c1c9ca4c4d2\": container with ID starting with 8c8f85c87dc7707f0cea5d9b2dd185ba3d9c84b3cc3a376b37b74c1c9ca4c4d2 not found: ID does not exist" Nov 25 17:00:54 crc kubenswrapper[4812]: I1125 17:00:54.218451 4812 scope.go:117] "RemoveContainer" containerID="bffdb5e8c5a4c11c11e7c2bbab6e7fb6726d3d87eac9b6654abcfd44d2b22674" Nov 25 17:00:54 crc kubenswrapper[4812]: E1125 17:00:54.218771 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bffdb5e8c5a4c11c11e7c2bbab6e7fb6726d3d87eac9b6654abcfd44d2b22674\": container with ID starting with bffdb5e8c5a4c11c11e7c2bbab6e7fb6726d3d87eac9b6654abcfd44d2b22674 not found: ID does not exist" containerID="bffdb5e8c5a4c11c11e7c2bbab6e7fb6726d3d87eac9b6654abcfd44d2b22674" Nov 25 17:00:54 crc kubenswrapper[4812]: I1125 17:00:54.218825 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bffdb5e8c5a4c11c11e7c2bbab6e7fb6726d3d87eac9b6654abcfd44d2b22674"} err="failed to get container status \"bffdb5e8c5a4c11c11e7c2bbab6e7fb6726d3d87eac9b6654abcfd44d2b22674\": rpc error: code = NotFound desc = could not find container \"bffdb5e8c5a4c11c11e7c2bbab6e7fb6726d3d87eac9b6654abcfd44d2b22674\": container with ID starting with bffdb5e8c5a4c11c11e7c2bbab6e7fb6726d3d87eac9b6654abcfd44d2b22674 not found: ID does not exist" Nov 25 17:00:54 crc kubenswrapper[4812]: I1125 17:00:54.218864 4812 scope.go:117] "RemoveContainer" containerID="7da0523ac74faf02dd5b72089ca7dfcf61823dacbdee28f80f65fbbf66e1dd5b" Nov 25 17:00:54 crc kubenswrapper[4812]: E1125 17:00:54.219139 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7da0523ac74faf02dd5b72089ca7dfcf61823dacbdee28f80f65fbbf66e1dd5b\": container with ID starting with 7da0523ac74faf02dd5b72089ca7dfcf61823dacbdee28f80f65fbbf66e1dd5b not found: ID does not exist" containerID="7da0523ac74faf02dd5b72089ca7dfcf61823dacbdee28f80f65fbbf66e1dd5b" Nov 25 17:00:54 crc kubenswrapper[4812]: I1125 17:00:54.219168 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7da0523ac74faf02dd5b72089ca7dfcf61823dacbdee28f80f65fbbf66e1dd5b"} err="failed to get container status \"7da0523ac74faf02dd5b72089ca7dfcf61823dacbdee28f80f65fbbf66e1dd5b\": rpc error: code = NotFound desc = could not find container \"7da0523ac74faf02dd5b72089ca7dfcf61823dacbdee28f80f65fbbf66e1dd5b\": container with ID starting with 7da0523ac74faf02dd5b72089ca7dfcf61823dacbdee28f80f65fbbf66e1dd5b not found: ID does not exist" Nov 25 17:00:54 crc kubenswrapper[4812]: I1125 17:00:54.225450 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2e8a6c88-4bd4-488f-821d-36c9cb50399a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2e8a6c88-4bd4-488f-821d-36c9cb50399a" (UID: "2e8a6c88-4bd4-488f-821d-36c9cb50399a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:00:54 crc kubenswrapper[4812]: I1125 17:00:54.268796 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7lhrk\" (UniqueName: \"kubernetes.io/projected/2e8a6c88-4bd4-488f-821d-36c9cb50399a-kube-api-access-7lhrk\") on node \"crc\" DevicePath \"\"" Nov 25 17:00:54 crc kubenswrapper[4812]: I1125 17:00:54.268831 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e8a6c88-4bd4-488f-821d-36c9cb50399a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:00:54 crc kubenswrapper[4812]: I1125 17:00:54.268841 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e8a6c88-4bd4-488f-821d-36c9cb50399a-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:00:54 crc kubenswrapper[4812]: I1125 17:00:54.464233 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8n7t6"] Nov 25 17:00:54 crc kubenswrapper[4812]: I1125 17:00:54.473821 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-8n7t6"] Nov 25 17:00:55 crc kubenswrapper[4812]: I1125 17:00:55.838282 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e8a6c88-4bd4-488f-821d-36c9cb50399a" path="/var/lib/kubelet/pods/2e8a6c88-4bd4-488f-821d-36c9cb50399a/volumes" Nov 25 17:00:57 crc kubenswrapper[4812]: I1125 17:00:57.332671 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:00:57 crc kubenswrapper[4812]: I1125 17:00:57.333099 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:00:57 crc kubenswrapper[4812]: I1125 17:00:57.333172 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" Nov 25 17:00:57 crc kubenswrapper[4812]: I1125 17:00:57.333971 4812 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0916093b8e73989d7d0a8f475c7e60ef04e5cae4ae347d150e81560d1068b4c0"} pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 17:00:57 crc kubenswrapper[4812]: I1125 17:00:57.334044 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" containerID="cri-o://0916093b8e73989d7d0a8f475c7e60ef04e5cae4ae347d150e81560d1068b4c0" gracePeriod=600 Nov 25 17:00:59 crc kubenswrapper[4812]: I1125 17:00:59.171617 4812 generic.go:334] "Generic (PLEG): container finished" podID="8ed911cf-2139-4b12-84ba-af635585ba29" containerID="0916093b8e73989d7d0a8f475c7e60ef04e5cae4ae347d150e81560d1068b4c0" exitCode=0 Nov 25 17:00:59 crc kubenswrapper[4812]: I1125 17:00:59.171670 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" event={"ID":"8ed911cf-2139-4b12-84ba-af635585ba29","Type":"ContainerDied","Data":"0916093b8e73989d7d0a8f475c7e60ef04e5cae4ae347d150e81560d1068b4c0"} Nov 25 17:00:59 crc kubenswrapper[4812]: I1125 17:00:59.172272 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" event={"ID":"8ed911cf-2139-4b12-84ba-af635585ba29","Type":"ContainerStarted","Data":"1342f495637eb94354f1b480bb23cc055dabea0e94c3ee8c3777be3bb44ef47e"} Nov 25 17:00:59 crc kubenswrapper[4812]: I1125 17:00:59.172305 4812 scope.go:117] "RemoveContainer" containerID="3b26e577296869e8ae3c303744ab388e7654f5620c398425aadc8669e43297d8" Nov 25 17:00:59 crc kubenswrapper[4812]: I1125 17:00:59.444940 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-xm2ss" Nov 25 17:00:59 crc kubenswrapper[4812]: I1125 17:00:59.445138 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-xm2ss" Nov 25 17:00:59 crc kubenswrapper[4812]: I1125 17:00:59.490017 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-xm2ss" Nov 25 17:01:00 crc kubenswrapper[4812]: I1125 17:01:00.231016 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-xm2ss" Nov 25 17:01:00 crc kubenswrapper[4812]: I1125 17:01:00.275733 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xm2ss"] Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.191856 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-xm2ss" podUID="2867d1ef-c470-499f-993e-7c18782611b6" containerName="registry-server" containerID="cri-o://a6b1b4eacb5ecd6983d576a32204c989fe4089c96bd54dbc7996c58a01ee3bd1" gracePeriod=2 Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.289730 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-86dc4d89c8-clwgz"] Nov 25 17:01:02 crc kubenswrapper[4812]: E1125 17:01:02.290279 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e8a6c88-4bd4-488f-821d-36c9cb50399a" containerName="extract-utilities" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.290297 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e8a6c88-4bd4-488f-821d-36c9cb50399a" containerName="extract-utilities" Nov 25 17:01:02 crc kubenswrapper[4812]: E1125 17:01:02.290316 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e8a6c88-4bd4-488f-821d-36c9cb50399a" containerName="extract-content" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.290322 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e8a6c88-4bd4-488f-821d-36c9cb50399a" containerName="extract-content" Nov 25 17:01:02 crc kubenswrapper[4812]: E1125 17:01:02.290337 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e8a6c88-4bd4-488f-821d-36c9cb50399a" containerName="registry-server" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.290343 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e8a6c88-4bd4-488f-821d-36c9cb50399a" containerName="registry-server" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.290452 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e8a6c88-4bd4-488f-821d-36c9cb50399a" containerName="registry-server" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.291083 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-clwgz" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.293600 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-z94w7" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.310214 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-79856dc55c-jnszn"] Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.311674 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-jnszn" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.313828 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-h428w" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.325220 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-86dc4d89c8-clwgz"] Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.331707 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-7d695c9b56-7dd4g"] Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.333177 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7dd4g" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.335513 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-sl66b" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.343683 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-79856dc55c-jnszn"] Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.359442 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-69b8c86946-ww5m5"] Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.364551 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-69b8c86946-ww5m5" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.377724 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-pft6m" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.392824 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gd5fv\" (UniqueName: \"kubernetes.io/projected/bac38f31-ec39-46b9-9bac-2920864fb8a2-kube-api-access-gd5fv\") pod \"barbican-operator-controller-manager-86dc4d89c8-clwgz\" (UID: \"bac38f31-ec39-46b9-9bac-2920864fb8a2\") " pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-clwgz" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.429044 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-774b86978c-zzhb4"] Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.430558 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-774b86978c-zzhb4" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.437058 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-gr9tw" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.449400 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-69b8c86946-ww5m5"] Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.470064 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c9694994-jth28"] Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.471445 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jth28" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.476661 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-4dkvl" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.494198 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-774b86978c-zzhb4"] Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.497515 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gcjpd\" (UniqueName: \"kubernetes.io/projected/d53b5c25-d66b-46c5-80a5-998eb9007598-kube-api-access-gcjpd\") pod \"glance-operator-controller-manager-69b8c86946-ww5m5\" (UID: \"d53b5c25-d66b-46c5-80a5-998eb9007598\") " pod="openstack-operators/glance-operator-controller-manager-69b8c86946-ww5m5" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.497604 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbkcn\" (UniqueName: \"kubernetes.io/projected/2374c36a-5118-4a90-985c-1f80597d73af-kube-api-access-bbkcn\") pod \"designate-operator-controller-manager-7d695c9b56-7dd4g\" (UID: \"2374c36a-5118-4a90-985c-1f80597d73af\") " pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7dd4g" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.497657 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gd5fv\" (UniqueName: \"kubernetes.io/projected/bac38f31-ec39-46b9-9bac-2920864fb8a2-kube-api-access-gd5fv\") pod \"barbican-operator-controller-manager-86dc4d89c8-clwgz\" (UID: \"bac38f31-ec39-46b9-9bac-2920864fb8a2\") " pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-clwgz" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.497692 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-929ps\" (UniqueName: \"kubernetes.io/projected/36e2e4fb-0369-4347-b8e6-3f4bb2e6f88b-kube-api-access-929ps\") pod \"cinder-operator-controller-manager-79856dc55c-jnszn\" (UID: \"36e2e4fb-0369-4347-b8e6-3f4bb2e6f88b\") " pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-jnszn" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.508350 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-7d695c9b56-7dd4g"] Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.526616 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c9694994-jth28"] Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.534937 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gd5fv\" (UniqueName: \"kubernetes.io/projected/bac38f31-ec39-46b9-9bac-2920864fb8a2-kube-api-access-gd5fv\") pod \"barbican-operator-controller-manager-86dc4d89c8-clwgz\" (UID: \"bac38f31-ec39-46b9-9bac-2920864fb8a2\") " pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-clwgz" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.545668 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-858778c9dc-ncdvr"] Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.546804 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-858778c9dc-ncdvr" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.550297 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-lp5kf" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.551030 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-858778c9dc-ncdvr"] Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.553122 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.561002 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5bfcdc958c-pljcc"] Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.562567 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-pljcc" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.564549 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-rs74b" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.575134 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-748dc6576f-6jwl5"] Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.577120 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-6jwl5" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.582053 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-k95ld" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.594345 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5bfcdc958c-pljcc"] Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.600268 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j5x5g\" (UniqueName: \"kubernetes.io/projected/7f72311f-8622-43f6-b499-8b52318b0e2a-kube-api-access-j5x5g\") pod \"heat-operator-controller-manager-774b86978c-zzhb4\" (UID: \"7f72311f-8622-43f6-b499-8b52318b0e2a\") " pod="openstack-operators/heat-operator-controller-manager-774b86978c-zzhb4" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.600547 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gcjpd\" (UniqueName: \"kubernetes.io/projected/d53b5c25-d66b-46c5-80a5-998eb9007598-kube-api-access-gcjpd\") pod \"glance-operator-controller-manager-69b8c86946-ww5m5\" (UID: \"d53b5c25-d66b-46c5-80a5-998eb9007598\") " pod="openstack-operators/glance-operator-controller-manager-69b8c86946-ww5m5" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.600658 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbkcn\" (UniqueName: \"kubernetes.io/projected/2374c36a-5118-4a90-985c-1f80597d73af-kube-api-access-bbkcn\") pod \"designate-operator-controller-manager-7d695c9b56-7dd4g\" (UID: \"2374c36a-5118-4a90-985c-1f80597d73af\") " pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7dd4g" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.600854 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-929ps\" (UniqueName: \"kubernetes.io/projected/36e2e4fb-0369-4347-b8e6-3f4bb2e6f88b-kube-api-access-929ps\") pod \"cinder-operator-controller-manager-79856dc55c-jnszn\" (UID: \"36e2e4fb-0369-4347-b8e6-3f4bb2e6f88b\") " pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-jnszn" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.600945 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v9jz6\" (UniqueName: \"kubernetes.io/projected/39d8b8c1-7015-487a-9263-25531a65c48c-kube-api-access-v9jz6\") pod \"horizon-operator-controller-manager-68c9694994-jth28\" (UID: \"39d8b8c1-7015-487a-9263-25531a65c48c\") " pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jth28" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.604428 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-748dc6576f-6jwl5"] Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.611183 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-58bb8d67cc-vnjdk"] Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.612724 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-vnjdk" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.614641 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-58bb8d67cc-vnjdk"] Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.618284 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-wghrc" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.635248 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qn86p"] Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.636724 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qn86p" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.646704 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-929ps\" (UniqueName: \"kubernetes.io/projected/36e2e4fb-0369-4347-b8e6-3f4bb2e6f88b-kube-api-access-929ps\") pod \"cinder-operator-controller-manager-79856dc55c-jnszn\" (UID: \"36e2e4fb-0369-4347-b8e6-3f4bb2e6f88b\") " pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-jnszn" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.647263 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-v44fh" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.648298 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-clwgz" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.654076 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qn86p"] Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.659092 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbkcn\" (UniqueName: \"kubernetes.io/projected/2374c36a-5118-4a90-985c-1f80597d73af-kube-api-access-bbkcn\") pod \"designate-operator-controller-manager-7d695c9b56-7dd4g\" (UID: \"2374c36a-5118-4a90-985c-1f80597d73af\") " pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7dd4g" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.659706 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gcjpd\" (UniqueName: \"kubernetes.io/projected/d53b5c25-d66b-46c5-80a5-998eb9007598-kube-api-access-gcjpd\") pod \"glance-operator-controller-manager-69b8c86946-ww5m5\" (UID: \"d53b5c25-d66b-46c5-80a5-998eb9007598\") " pod="openstack-operators/glance-operator-controller-manager-69b8c86946-ww5m5" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.659749 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-bsfwl"] Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.663939 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-bsfwl" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.671944 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vncgw"] Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.673237 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vncgw" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.678612 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-4zdz8" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.689396 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-bsfwl"] Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.690062 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-llvr2" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.693233 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-jnszn" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.702302 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j5x5g\" (UniqueName: \"kubernetes.io/projected/7f72311f-8622-43f6-b499-8b52318b0e2a-kube-api-access-j5x5g\") pod \"heat-operator-controller-manager-774b86978c-zzhb4\" (UID: \"7f72311f-8622-43f6-b499-8b52318b0e2a\") " pod="openstack-operators/heat-operator-controller-manager-774b86978c-zzhb4" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.702364 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hzrzc\" (UniqueName: \"kubernetes.io/projected/48707b31-d8f9-4a7e-a8b9-2728249f0a49-kube-api-access-hzrzc\") pod \"ironic-operator-controller-manager-5bfcdc958c-pljcc\" (UID: \"48707b31-d8f9-4a7e-a8b9-2728249f0a49\") " pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-pljcc" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.702431 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/16fc0b64-6599-4b8b-a0b7-b609dab9dd31-cert\") pod \"infra-operator-controller-manager-858778c9dc-ncdvr\" (UID: \"16fc0b64-6599-4b8b-a0b7-b609dab9dd31\") " pod="openstack-operators/infra-operator-controller-manager-858778c9dc-ncdvr" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.702461 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mxzxz\" (UniqueName: \"kubernetes.io/projected/16fc0b64-6599-4b8b-a0b7-b609dab9dd31-kube-api-access-mxzxz\") pod \"infra-operator-controller-manager-858778c9dc-ncdvr\" (UID: \"16fc0b64-6599-4b8b-a0b7-b609dab9dd31\") " pod="openstack-operators/infra-operator-controller-manager-858778c9dc-ncdvr" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.702507 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v9jz6\" (UniqueName: \"kubernetes.io/projected/39d8b8c1-7015-487a-9263-25531a65c48c-kube-api-access-v9jz6\") pod \"horizon-operator-controller-manager-68c9694994-jth28\" (UID: \"39d8b8c1-7015-487a-9263-25531a65c48c\") " pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jth28" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.702575 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n2qzt\" (UniqueName: \"kubernetes.io/projected/4c649e41-10e8-4eee-bfc0-bf1a9409e421-kube-api-access-n2qzt\") pod \"keystone-operator-controller-manager-748dc6576f-6jwl5\" (UID: \"4c649e41-10e8-4eee-bfc0-bf1a9409e421\") " pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-6jwl5" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.708584 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vncgw"] Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.717066 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-fd75fd47d-6kqrr"] Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.718273 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-6kqrr" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.729321 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-b29xg" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.731975 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7dd4g" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.738488 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-69b8c86946-ww5m5" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.741666 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v9jz6\" (UniqueName: \"kubernetes.io/projected/39d8b8c1-7015-487a-9263-25531a65c48c-kube-api-access-v9jz6\") pod \"horizon-operator-controller-manager-68c9694994-jth28\" (UID: \"39d8b8c1-7015-487a-9263-25531a65c48c\") " pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jth28" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.745562 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j5x5g\" (UniqueName: \"kubernetes.io/projected/7f72311f-8622-43f6-b499-8b52318b0e2a-kube-api-access-j5x5g\") pod \"heat-operator-controller-manager-774b86978c-zzhb4\" (UID: \"7f72311f-8622-43f6-b499-8b52318b0e2a\") " pod="openstack-operators/heat-operator-controller-manager-774b86978c-zzhb4" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.765634 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-774b86978c-zzhb4" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.769223 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-fd75fd47d-6kqrr"] Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.780833 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-66cf5c67ff-mrlbz"] Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.781970 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-mrlbz" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.784739 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-dvhx5" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.791013 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-2d2j9"] Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.792331 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-2d2j9" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.795368 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.795866 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-x26zf" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.800614 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jth28" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.803407 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6b8fl\" (UniqueName: \"kubernetes.io/projected/d6f00506-8ef7-46ec-9492-01e0005f90d3-kube-api-access-6b8fl\") pod \"neutron-operator-controller-manager-7c57c8bbc4-vncgw\" (UID: \"d6f00506-8ef7-46ec-9492-01e0005f90d3\") " pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vncgw" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.803484 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/16fc0b64-6599-4b8b-a0b7-b609dab9dd31-cert\") pod \"infra-operator-controller-manager-858778c9dc-ncdvr\" (UID: \"16fc0b64-6599-4b8b-a0b7-b609dab9dd31\") " pod="openstack-operators/infra-operator-controller-manager-858778c9dc-ncdvr" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.803509 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k6bkw\" (UniqueName: \"kubernetes.io/projected/18418f15-9ec8-48df-a761-118f45058d06-kube-api-access-k6bkw\") pod \"mariadb-operator-controller-manager-cb6c4fdb7-qn86p\" (UID: \"18418f15-9ec8-48df-a761-118f45058d06\") " pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qn86p" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.803527 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mxzxz\" (UniqueName: \"kubernetes.io/projected/16fc0b64-6599-4b8b-a0b7-b609dab9dd31-kube-api-access-mxzxz\") pod \"infra-operator-controller-manager-858778c9dc-ncdvr\" (UID: \"16fc0b64-6599-4b8b-a0b7-b609dab9dd31\") " pod="openstack-operators/infra-operator-controller-manager-858778c9dc-ncdvr" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.803574 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5mxp9\" (UniqueName: \"kubernetes.io/projected/4709cc72-b3b5-4bf6-ac84-45cc1f6bc2bf-kube-api-access-5mxp9\") pod \"manila-operator-controller-manager-58bb8d67cc-vnjdk\" (UID: \"4709cc72-b3b5-4bf6-ac84-45cc1f6bc2bf\") " pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-vnjdk" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.803629 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n2qzt\" (UniqueName: \"kubernetes.io/projected/4c649e41-10e8-4eee-bfc0-bf1a9409e421-kube-api-access-n2qzt\") pod \"keystone-operator-controller-manager-748dc6576f-6jwl5\" (UID: \"4c649e41-10e8-4eee-bfc0-bf1a9409e421\") " pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-6jwl5" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.803653 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d6shb\" (UniqueName: \"kubernetes.io/projected/ab19d0cd-1e29-41af-892c-8f25f12b7f1c-kube-api-access-d6shb\") pod \"nova-operator-controller-manager-79556f57fc-bsfwl\" (UID: \"ab19d0cd-1e29-41af-892c-8f25f12b7f1c\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-bsfwl" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.803678 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hzrzc\" (UniqueName: \"kubernetes.io/projected/48707b31-d8f9-4a7e-a8b9-2728249f0a49-kube-api-access-hzrzc\") pod \"ironic-operator-controller-manager-5bfcdc958c-pljcc\" (UID: \"48707b31-d8f9-4a7e-a8b9-2728249f0a49\") " pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-pljcc" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.805471 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-5db546f9d9-kx2fg"] Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.807596 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kx2fg" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.810137 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/16fc0b64-6599-4b8b-a0b7-b609dab9dd31-cert\") pod \"infra-operator-controller-manager-858778c9dc-ncdvr\" (UID: \"16fc0b64-6599-4b8b-a0b7-b609dab9dd31\") " pod="openstack-operators/infra-operator-controller-manager-858778c9dc-ncdvr" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.816279 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-gdlbb" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.818090 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-66cf5c67ff-mrlbz"] Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.833841 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-2d2j9"] Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.834506 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hzrzc\" (UniqueName: \"kubernetes.io/projected/48707b31-d8f9-4a7e-a8b9-2728249f0a49-kube-api-access-hzrzc\") pod \"ironic-operator-controller-manager-5bfcdc958c-pljcc\" (UID: \"48707b31-d8f9-4a7e-a8b9-2728249f0a49\") " pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-pljcc" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.840982 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n2qzt\" (UniqueName: \"kubernetes.io/projected/4c649e41-10e8-4eee-bfc0-bf1a9409e421-kube-api-access-n2qzt\") pod \"keystone-operator-controller-manager-748dc6576f-6jwl5\" (UID: \"4c649e41-10e8-4eee-bfc0-bf1a9409e421\") " pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-6jwl5" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.841245 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mxzxz\" (UniqueName: \"kubernetes.io/projected/16fc0b64-6599-4b8b-a0b7-b609dab9dd31-kube-api-access-mxzxz\") pod \"infra-operator-controller-manager-858778c9dc-ncdvr\" (UID: \"16fc0b64-6599-4b8b-a0b7-b609dab9dd31\") " pod="openstack-operators/infra-operator-controller-manager-858778c9dc-ncdvr" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.873795 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5db546f9d9-kx2fg"] Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.875502 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-858778c9dc-ncdvr" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.900174 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-pljcc" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.912061 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6b8fl\" (UniqueName: \"kubernetes.io/projected/d6f00506-8ef7-46ec-9492-01e0005f90d3-kube-api-access-6b8fl\") pod \"neutron-operator-controller-manager-7c57c8bbc4-vncgw\" (UID: \"d6f00506-8ef7-46ec-9492-01e0005f90d3\") " pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vncgw" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.912134 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xr8g7\" (UniqueName: \"kubernetes.io/projected/bb01ec67-804d-4800-9ab4-e607563017b2-kube-api-access-xr8g7\") pod \"placement-operator-controller-manager-5db546f9d9-kx2fg\" (UID: \"bb01ec67-804d-4800-9ab4-e607563017b2\") " pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kx2fg" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.912178 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k6bkw\" (UniqueName: \"kubernetes.io/projected/18418f15-9ec8-48df-a761-118f45058d06-kube-api-access-k6bkw\") pod \"mariadb-operator-controller-manager-cb6c4fdb7-qn86p\" (UID: \"18418f15-9ec8-48df-a761-118f45058d06\") " pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qn86p" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.912285 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/54850143-f77e-4d59-bcc4-c5bd3bc85880-cert\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-2d2j9\" (UID: \"54850143-f77e-4d59-bcc4-c5bd3bc85880\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-2d2j9" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.912397 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5mxp9\" (UniqueName: \"kubernetes.io/projected/4709cc72-b3b5-4bf6-ac84-45cc1f6bc2bf-kube-api-access-5mxp9\") pod \"manila-operator-controller-manager-58bb8d67cc-vnjdk\" (UID: \"4709cc72-b3b5-4bf6-ac84-45cc1f6bc2bf\") " pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-vnjdk" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.912498 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d6qlt\" (UniqueName: \"kubernetes.io/projected/15120e64-d800-43d8-b8c3-673e5854baef-kube-api-access-d6qlt\") pod \"octavia-operator-controller-manager-fd75fd47d-6kqrr\" (UID: \"15120e64-d800-43d8-b8c3-673e5854baef\") " pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-6kqrr" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.912844 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7hwjx\" (UniqueName: \"kubernetes.io/projected/1e943cab-36af-421d-b7a4-24010912da99-kube-api-access-7hwjx\") pod \"ovn-operator-controller-manager-66cf5c67ff-mrlbz\" (UID: \"1e943cab-36af-421d-b7a4-24010912da99\") " pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-mrlbz" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.913157 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d6shb\" (UniqueName: \"kubernetes.io/projected/ab19d0cd-1e29-41af-892c-8f25f12b7f1c-kube-api-access-d6shb\") pod \"nova-operator-controller-manager-79556f57fc-bsfwl\" (UID: \"ab19d0cd-1e29-41af-892c-8f25f12b7f1c\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-bsfwl" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.913240 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7b4vg\" (UniqueName: \"kubernetes.io/projected/54850143-f77e-4d59-bcc4-c5bd3bc85880-kube-api-access-7b4vg\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-2d2j9\" (UID: \"54850143-f77e-4d59-bcc4-c5bd3bc85880\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-2d2j9" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.940671 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-6jwl5" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.951708 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-6fdc4fcf86-2gwk8"] Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.954281 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5mxp9\" (UniqueName: \"kubernetes.io/projected/4709cc72-b3b5-4bf6-ac84-45cc1f6bc2bf-kube-api-access-5mxp9\") pod \"manila-operator-controller-manager-58bb8d67cc-vnjdk\" (UID: \"4709cc72-b3b5-4bf6-ac84-45cc1f6bc2bf\") " pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-vnjdk" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.954790 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k6bkw\" (UniqueName: \"kubernetes.io/projected/18418f15-9ec8-48df-a761-118f45058d06-kube-api-access-k6bkw\") pod \"mariadb-operator-controller-manager-cb6c4fdb7-qn86p\" (UID: \"18418f15-9ec8-48df-a761-118f45058d06\") " pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qn86p" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.956693 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d6shb\" (UniqueName: \"kubernetes.io/projected/ab19d0cd-1e29-41af-892c-8f25f12b7f1c-kube-api-access-d6shb\") pod \"nova-operator-controller-manager-79556f57fc-bsfwl\" (UID: \"ab19d0cd-1e29-41af-892c-8f25f12b7f1c\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-bsfwl" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.958333 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-2gwk8" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.963738 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-qknbl" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.964105 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6b8fl\" (UniqueName: \"kubernetes.io/projected/d6f00506-8ef7-46ec-9492-01e0005f90d3-kube-api-access-6b8fl\") pod \"neutron-operator-controller-manager-7c57c8bbc4-vncgw\" (UID: \"d6f00506-8ef7-46ec-9492-01e0005f90d3\") " pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vncgw" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.984850 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-567f98c9d-h9l5r"] Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.986984 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-h9l5r" Nov 25 17:01:02 crc kubenswrapper[4812]: I1125 17:01:02.994476 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-8vt9v" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:02.998627 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-6fdc4fcf86-2gwk8"] Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.012453 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-vnjdk" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.013075 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-567f98c9d-h9l5r"] Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.018045 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7hwjx\" (UniqueName: \"kubernetes.io/projected/1e943cab-36af-421d-b7a4-24010912da99-kube-api-access-7hwjx\") pod \"ovn-operator-controller-manager-66cf5c67ff-mrlbz\" (UID: \"1e943cab-36af-421d-b7a4-24010912da99\") " pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-mrlbz" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.018155 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7b4vg\" (UniqueName: \"kubernetes.io/projected/54850143-f77e-4d59-bcc4-c5bd3bc85880-kube-api-access-7b4vg\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-2d2j9\" (UID: \"54850143-f77e-4d59-bcc4-c5bd3bc85880\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-2d2j9" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.018937 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xr8g7\" (UniqueName: \"kubernetes.io/projected/bb01ec67-804d-4800-9ab4-e607563017b2-kube-api-access-xr8g7\") pod \"placement-operator-controller-manager-5db546f9d9-kx2fg\" (UID: \"bb01ec67-804d-4800-9ab4-e607563017b2\") " pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kx2fg" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.018979 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/54850143-f77e-4d59-bcc4-c5bd3bc85880-cert\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-2d2j9\" (UID: \"54850143-f77e-4d59-bcc4-c5bd3bc85880\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-2d2j9" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.019029 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d6qlt\" (UniqueName: \"kubernetes.io/projected/15120e64-d800-43d8-b8c3-673e5854baef-kube-api-access-d6qlt\") pod \"octavia-operator-controller-manager-fd75fd47d-6kqrr\" (UID: \"15120e64-d800-43d8-b8c3-673e5854baef\") " pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-6kqrr" Nov 25 17:01:03 crc kubenswrapper[4812]: E1125 17:01:03.021809 4812 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 17:01:03 crc kubenswrapper[4812]: E1125 17:01:03.021873 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/54850143-f77e-4d59-bcc4-c5bd3bc85880-cert podName:54850143-f77e-4d59-bcc4-c5bd3bc85880 nodeName:}" failed. No retries permitted until 2025-11-25 17:01:03.521853583 +0000 UTC m=+838.361995678 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/54850143-f77e-4d59-bcc4-c5bd3bc85880-cert") pod "openstack-baremetal-operator-controller-manager-544b9bb9-2d2j9" (UID: "54850143-f77e-4d59-bcc4-c5bd3bc85880") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.050374 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qn86p" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.057861 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xr8g7\" (UniqueName: \"kubernetes.io/projected/bb01ec67-804d-4800-9ab4-e607563017b2-kube-api-access-xr8g7\") pod \"placement-operator-controller-manager-5db546f9d9-kx2fg\" (UID: \"bb01ec67-804d-4800-9ab4-e607563017b2\") " pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kx2fg" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.054344 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d6qlt\" (UniqueName: \"kubernetes.io/projected/15120e64-d800-43d8-b8c3-673e5854baef-kube-api-access-d6qlt\") pod \"octavia-operator-controller-manager-fd75fd47d-6kqrr\" (UID: \"15120e64-d800-43d8-b8c3-673e5854baef\") " pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-6kqrr" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.070310 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-bsfwl" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.074205 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7b4vg\" (UniqueName: \"kubernetes.io/projected/54850143-f77e-4d59-bcc4-c5bd3bc85880-kube-api-access-7b4vg\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-2d2j9\" (UID: \"54850143-f77e-4d59-bcc4-c5bd3bc85880\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-2d2j9" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.080855 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vncgw" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.091408 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7hwjx\" (UniqueName: \"kubernetes.io/projected/1e943cab-36af-421d-b7a4-24010912da99-kube-api-access-7hwjx\") pod \"ovn-operator-controller-manager-66cf5c67ff-mrlbz\" (UID: \"1e943cab-36af-421d-b7a4-24010912da99\") " pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-mrlbz" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.101625 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-5cb74df96-jnhr9"] Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.103105 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cb74df96-jnhr9" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.104084 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-6kqrr" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.108072 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cb74df96-jnhr9"] Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.110738 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-shqwq" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.120626 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-glhd8\" (UniqueName: \"kubernetes.io/projected/10dee73c-a6d8-429d-b5c0-9226eec6d1f3-kube-api-access-glhd8\") pod \"swift-operator-controller-manager-6fdc4fcf86-2gwk8\" (UID: \"10dee73c-a6d8-429d-b5c0-9226eec6d1f3\") " pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-2gwk8" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.120920 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4fhjg\" (UniqueName: \"kubernetes.io/projected/07907797-7edd-48e0-bb69-e42ad740f173-kube-api-access-4fhjg\") pod \"telemetry-operator-controller-manager-567f98c9d-h9l5r\" (UID: \"07907797-7edd-48e0-bb69-e42ad740f173\") " pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-h9l5r" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.131111 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-mrlbz" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.141764 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-864885998-vzgsw"] Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.145450 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-864885998-vzgsw" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.148843 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-drzcc" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.160664 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-864885998-vzgsw"] Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.195322 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-585789bb75-mft5q"] Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.207357 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-585789bb75-mft5q" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.213634 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-585789bb75-mft5q"] Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.217954 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.218599 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-5k96m" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.218747 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.239846 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glhd8\" (UniqueName: \"kubernetes.io/projected/10dee73c-a6d8-429d-b5c0-9226eec6d1f3-kube-api-access-glhd8\") pod \"swift-operator-controller-manager-6fdc4fcf86-2gwk8\" (UID: \"10dee73c-a6d8-429d-b5c0-9226eec6d1f3\") " pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-2gwk8" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.239993 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4fhjg\" (UniqueName: \"kubernetes.io/projected/07907797-7edd-48e0-bb69-e42ad740f173-kube-api-access-4fhjg\") pod \"telemetry-operator-controller-manager-567f98c9d-h9l5r\" (UID: \"07907797-7edd-48e0-bb69-e42ad740f173\") " pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-h9l5r" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.240053 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-swl26\" (UniqueName: \"kubernetes.io/projected/98133284-26db-4073-a43c-f9572476153c-kube-api-access-swl26\") pod \"watcher-operator-controller-manager-864885998-vzgsw\" (UID: \"98133284-26db-4073-a43c-f9572476153c\") " pod="openstack-operators/watcher-operator-controller-manager-864885998-vzgsw" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.240089 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hvt6t\" (UniqueName: \"kubernetes.io/projected/13981b57-58d8-42f8-a833-f9760f06df20-kube-api-access-hvt6t\") pod \"test-operator-controller-manager-5cb74df96-jnhr9\" (UID: \"13981b57-58d8-42f8-a833-f9760f06df20\") " pod="openstack-operators/test-operator-controller-manager-5cb74df96-jnhr9" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.240269 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kx2fg" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.279519 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-glhd8\" (UniqueName: \"kubernetes.io/projected/10dee73c-a6d8-429d-b5c0-9226eec6d1f3-kube-api-access-glhd8\") pod \"swift-operator-controller-manager-6fdc4fcf86-2gwk8\" (UID: \"10dee73c-a6d8-429d-b5c0-9226eec6d1f3\") " pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-2gwk8" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.285320 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4fhjg\" (UniqueName: \"kubernetes.io/projected/07907797-7edd-48e0-bb69-e42ad740f173-kube-api-access-4fhjg\") pod \"telemetry-operator-controller-manager-567f98c9d-h9l5r\" (UID: \"07907797-7edd-48e0-bb69-e42ad740f173\") " pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-h9l5r" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.287407 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-sb4m4"] Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.288444 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-sb4m4" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.297229 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-sb4m4"] Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.311988 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-2gwk8" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.312000 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-jlhx7" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.329866 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-h9l5r" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.374324 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-swl26\" (UniqueName: \"kubernetes.io/projected/98133284-26db-4073-a43c-f9572476153c-kube-api-access-swl26\") pod \"watcher-operator-controller-manager-864885998-vzgsw\" (UID: \"98133284-26db-4073-a43c-f9572476153c\") " pod="openstack-operators/watcher-operator-controller-manager-864885998-vzgsw" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.374369 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hvt6t\" (UniqueName: \"kubernetes.io/projected/13981b57-58d8-42f8-a833-f9760f06df20-kube-api-access-hvt6t\") pod \"test-operator-controller-manager-5cb74df96-jnhr9\" (UID: \"13981b57-58d8-42f8-a833-f9760f06df20\") " pod="openstack-operators/test-operator-controller-manager-5cb74df96-jnhr9" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.374410 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/f3785053-5fa1-43b6-86f7-0182a1a49946-webhook-certs\") pod \"openstack-operator-controller-manager-585789bb75-mft5q\" (UID: \"f3785053-5fa1-43b6-86f7-0182a1a49946\") " pod="openstack-operators/openstack-operator-controller-manager-585789bb75-mft5q" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.374468 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lsbwn\" (UniqueName: \"kubernetes.io/projected/b8169d76-3e78-4510-aa23-e8d733d495a2-kube-api-access-lsbwn\") pod \"rabbitmq-cluster-operator-manager-668c99d594-sb4m4\" (UID: \"b8169d76-3e78-4510-aa23-e8d733d495a2\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-sb4m4" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.374500 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f2592\" (UniqueName: \"kubernetes.io/projected/f3785053-5fa1-43b6-86f7-0182a1a49946-kube-api-access-f2592\") pod \"openstack-operator-controller-manager-585789bb75-mft5q\" (UID: \"f3785053-5fa1-43b6-86f7-0182a1a49946\") " pod="openstack-operators/openstack-operator-controller-manager-585789bb75-mft5q" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.374613 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f3785053-5fa1-43b6-86f7-0182a1a49946-metrics-certs\") pod \"openstack-operator-controller-manager-585789bb75-mft5q\" (UID: \"f3785053-5fa1-43b6-86f7-0182a1a49946\") " pod="openstack-operators/openstack-operator-controller-manager-585789bb75-mft5q" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.412975 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hvt6t\" (UniqueName: \"kubernetes.io/projected/13981b57-58d8-42f8-a833-f9760f06df20-kube-api-access-hvt6t\") pod \"test-operator-controller-manager-5cb74df96-jnhr9\" (UID: \"13981b57-58d8-42f8-a833-f9760f06df20\") " pod="openstack-operators/test-operator-controller-manager-5cb74df96-jnhr9" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.437251 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-swl26\" (UniqueName: \"kubernetes.io/projected/98133284-26db-4073-a43c-f9572476153c-kube-api-access-swl26\") pod \"watcher-operator-controller-manager-864885998-vzgsw\" (UID: \"98133284-26db-4073-a43c-f9572476153c\") " pod="openstack-operators/watcher-operator-controller-manager-864885998-vzgsw" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.442108 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-86dc4d89c8-clwgz"] Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.479485 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/f3785053-5fa1-43b6-86f7-0182a1a49946-webhook-certs\") pod \"openstack-operator-controller-manager-585789bb75-mft5q\" (UID: \"f3785053-5fa1-43b6-86f7-0182a1a49946\") " pod="openstack-operators/openstack-operator-controller-manager-585789bb75-mft5q" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.479588 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lsbwn\" (UniqueName: \"kubernetes.io/projected/b8169d76-3e78-4510-aa23-e8d733d495a2-kube-api-access-lsbwn\") pod \"rabbitmq-cluster-operator-manager-668c99d594-sb4m4\" (UID: \"b8169d76-3e78-4510-aa23-e8d733d495a2\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-sb4m4" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.479626 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f2592\" (UniqueName: \"kubernetes.io/projected/f3785053-5fa1-43b6-86f7-0182a1a49946-kube-api-access-f2592\") pod \"openstack-operator-controller-manager-585789bb75-mft5q\" (UID: \"f3785053-5fa1-43b6-86f7-0182a1a49946\") " pod="openstack-operators/openstack-operator-controller-manager-585789bb75-mft5q" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.479714 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f3785053-5fa1-43b6-86f7-0182a1a49946-metrics-certs\") pod \"openstack-operator-controller-manager-585789bb75-mft5q\" (UID: \"f3785053-5fa1-43b6-86f7-0182a1a49946\") " pod="openstack-operators/openstack-operator-controller-manager-585789bb75-mft5q" Nov 25 17:01:03 crc kubenswrapper[4812]: E1125 17:01:03.479931 4812 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 25 17:01:03 crc kubenswrapper[4812]: E1125 17:01:03.480006 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f3785053-5fa1-43b6-86f7-0182a1a49946-metrics-certs podName:f3785053-5fa1-43b6-86f7-0182a1a49946 nodeName:}" failed. No retries permitted until 2025-11-25 17:01:03.979981379 +0000 UTC m=+838.820123474 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f3785053-5fa1-43b6-86f7-0182a1a49946-metrics-certs") pod "openstack-operator-controller-manager-585789bb75-mft5q" (UID: "f3785053-5fa1-43b6-86f7-0182a1a49946") : secret "metrics-server-cert" not found Nov 25 17:01:03 crc kubenswrapper[4812]: E1125 17:01:03.480512 4812 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 25 17:01:03 crc kubenswrapper[4812]: E1125 17:01:03.480564 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f3785053-5fa1-43b6-86f7-0182a1a49946-webhook-certs podName:f3785053-5fa1-43b6-86f7-0182a1a49946 nodeName:}" failed. No retries permitted until 2025-11-25 17:01:03.980554345 +0000 UTC m=+838.820696440 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/f3785053-5fa1-43b6-86f7-0182a1a49946-webhook-certs") pod "openstack-operator-controller-manager-585789bb75-mft5q" (UID: "f3785053-5fa1-43b6-86f7-0182a1a49946") : secret "webhook-server-cert" not found Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.506560 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f2592\" (UniqueName: \"kubernetes.io/projected/f3785053-5fa1-43b6-86f7-0182a1a49946-kube-api-access-f2592\") pod \"openstack-operator-controller-manager-585789bb75-mft5q\" (UID: \"f3785053-5fa1-43b6-86f7-0182a1a49946\") " pod="openstack-operators/openstack-operator-controller-manager-585789bb75-mft5q" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.512736 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lsbwn\" (UniqueName: \"kubernetes.io/projected/b8169d76-3e78-4510-aa23-e8d733d495a2-kube-api-access-lsbwn\") pod \"rabbitmq-cluster-operator-manager-668c99d594-sb4m4\" (UID: \"b8169d76-3e78-4510-aa23-e8d733d495a2\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-sb4m4" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.522713 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cb74df96-jnhr9" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.547213 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-864885998-vzgsw" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.581431 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/54850143-f77e-4d59-bcc4-c5bd3bc85880-cert\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-2d2j9\" (UID: \"54850143-f77e-4d59-bcc4-c5bd3bc85880\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-2d2j9" Nov 25 17:01:03 crc kubenswrapper[4812]: E1125 17:01:03.581728 4812 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 17:01:03 crc kubenswrapper[4812]: E1125 17:01:03.581844 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/54850143-f77e-4d59-bcc4-c5bd3bc85880-cert podName:54850143-f77e-4d59-bcc4-c5bd3bc85880 nodeName:}" failed. No retries permitted until 2025-11-25 17:01:04.581770557 +0000 UTC m=+839.421912652 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/54850143-f77e-4d59-bcc4-c5bd3bc85880-cert") pod "openstack-baremetal-operator-controller-manager-544b9bb9-2d2j9" (UID: "54850143-f77e-4d59-bcc4-c5bd3bc85880") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.691608 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-sb4m4" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.708680 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-7d695c9b56-7dd4g"] Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.738996 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-79856dc55c-jnszn"] Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.990641 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/f3785053-5fa1-43b6-86f7-0182a1a49946-webhook-certs\") pod \"openstack-operator-controller-manager-585789bb75-mft5q\" (UID: \"f3785053-5fa1-43b6-86f7-0182a1a49946\") " pod="openstack-operators/openstack-operator-controller-manager-585789bb75-mft5q" Nov 25 17:01:03 crc kubenswrapper[4812]: I1125 17:01:03.990824 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f3785053-5fa1-43b6-86f7-0182a1a49946-metrics-certs\") pod \"openstack-operator-controller-manager-585789bb75-mft5q\" (UID: \"f3785053-5fa1-43b6-86f7-0182a1a49946\") " pod="openstack-operators/openstack-operator-controller-manager-585789bb75-mft5q" Nov 25 17:01:03 crc kubenswrapper[4812]: E1125 17:01:03.990874 4812 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 25 17:01:03 crc kubenswrapper[4812]: E1125 17:01:03.990947 4812 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 25 17:01:03 crc kubenswrapper[4812]: E1125 17:01:03.990971 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f3785053-5fa1-43b6-86f7-0182a1a49946-webhook-certs podName:f3785053-5fa1-43b6-86f7-0182a1a49946 nodeName:}" failed. No retries permitted until 2025-11-25 17:01:04.990945462 +0000 UTC m=+839.831087557 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/f3785053-5fa1-43b6-86f7-0182a1a49946-webhook-certs") pod "openstack-operator-controller-manager-585789bb75-mft5q" (UID: "f3785053-5fa1-43b6-86f7-0182a1a49946") : secret "webhook-server-cert" not found Nov 25 17:01:03 crc kubenswrapper[4812]: E1125 17:01:03.990989 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f3785053-5fa1-43b6-86f7-0182a1a49946-metrics-certs podName:f3785053-5fa1-43b6-86f7-0182a1a49946 nodeName:}" failed. No retries permitted until 2025-11-25 17:01:04.990981723 +0000 UTC m=+839.831123818 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f3785053-5fa1-43b6-86f7-0182a1a49946-metrics-certs") pod "openstack-operator-controller-manager-585789bb75-mft5q" (UID: "f3785053-5fa1-43b6-86f7-0182a1a49946") : secret "metrics-server-cert" not found Nov 25 17:01:04 crc kubenswrapper[4812]: I1125 17:01:04.199017 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-858778c9dc-ncdvr"] Nov 25 17:01:04 crc kubenswrapper[4812]: I1125 17:01:04.206228 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c9694994-jth28"] Nov 25 17:01:04 crc kubenswrapper[4812]: I1125 17:01:04.213808 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-69b8c86946-ww5m5"] Nov 25 17:01:04 crc kubenswrapper[4812]: I1125 17:01:04.219309 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5bfcdc958c-pljcc"] Nov 25 17:01:04 crc kubenswrapper[4812]: I1125 17:01:04.223790 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-774b86978c-zzhb4"] Nov 25 17:01:04 crc kubenswrapper[4812]: I1125 17:01:04.257833 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-858778c9dc-ncdvr" event={"ID":"16fc0b64-6599-4b8b-a0b7-b609dab9dd31","Type":"ContainerStarted","Data":"28ad733dff114ea34f606ebc51772132340069036927fdc15d8d959d9a0cca07"} Nov 25 17:01:04 crc kubenswrapper[4812]: I1125 17:01:04.259506 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-zzhb4" event={"ID":"7f72311f-8622-43f6-b499-8b52318b0e2a","Type":"ContainerStarted","Data":"a5aa047b7888637c568896879faa9f389503cf048a29321cd9f2653c169b753d"} Nov 25 17:01:04 crc kubenswrapper[4812]: I1125 17:01:04.260648 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-69b8c86946-ww5m5" event={"ID":"d53b5c25-d66b-46c5-80a5-998eb9007598","Type":"ContainerStarted","Data":"a828930ad67e47eb95563526bfabe811ef67f74d7b85547d9a0d8126a7819294"} Nov 25 17:01:04 crc kubenswrapper[4812]: I1125 17:01:04.261634 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7dd4g" event={"ID":"2374c36a-5118-4a90-985c-1f80597d73af","Type":"ContainerStarted","Data":"901d1f538e285f6b9997ba8cee20bcc28d88dda5726cbcfc51c74a9863c278fa"} Nov 25 17:01:04 crc kubenswrapper[4812]: I1125 17:01:04.262687 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-pljcc" event={"ID":"48707b31-d8f9-4a7e-a8b9-2728249f0a49","Type":"ContainerStarted","Data":"e74776f486c82ceba746c0ffef7bcf63546c67df3aabc6646f36d4e1511eb22a"} Nov 25 17:01:04 crc kubenswrapper[4812]: I1125 17:01:04.266893 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-jnszn" event={"ID":"36e2e4fb-0369-4347-b8e6-3f4bb2e6f88b","Type":"ContainerStarted","Data":"8daa8cd01d3344da184ac3deaea4ed680a42432f348b5b50da52d881d11335bf"} Nov 25 17:01:04 crc kubenswrapper[4812]: I1125 17:01:04.270270 4812 generic.go:334] "Generic (PLEG): container finished" podID="2867d1ef-c470-499f-993e-7c18782611b6" containerID="a6b1b4eacb5ecd6983d576a32204c989fe4089c96bd54dbc7996c58a01ee3bd1" exitCode=0 Nov 25 17:01:04 crc kubenswrapper[4812]: I1125 17:01:04.270381 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xm2ss" event={"ID":"2867d1ef-c470-499f-993e-7c18782611b6","Type":"ContainerDied","Data":"a6b1b4eacb5ecd6983d576a32204c989fe4089c96bd54dbc7996c58a01ee3bd1"} Nov 25 17:01:04 crc kubenswrapper[4812]: I1125 17:01:04.271754 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-clwgz" event={"ID":"bac38f31-ec39-46b9-9bac-2920864fb8a2","Type":"ContainerStarted","Data":"83aa10190ff7b07908c400d7507d60d20c4d8388ca6911c20ba756ad9447168c"} Nov 25 17:01:04 crc kubenswrapper[4812]: I1125 17:01:04.272812 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jth28" event={"ID":"39d8b8c1-7015-487a-9263-25531a65c48c","Type":"ContainerStarted","Data":"0d083ceae65fcd06a2123323c2c41faf80ddbee45711379c86019d6ad67a6b48"} Nov 25 17:01:04 crc kubenswrapper[4812]: I1125 17:01:04.603892 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/54850143-f77e-4d59-bcc4-c5bd3bc85880-cert\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-2d2j9\" (UID: \"54850143-f77e-4d59-bcc4-c5bd3bc85880\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-2d2j9" Nov 25 17:01:04 crc kubenswrapper[4812]: I1125 17:01:04.612722 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-748dc6576f-6jwl5"] Nov 25 17:01:04 crc kubenswrapper[4812]: I1125 17:01:04.620173 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/54850143-f77e-4d59-bcc4-c5bd3bc85880-cert\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-2d2j9\" (UID: \"54850143-f77e-4d59-bcc4-c5bd3bc85880\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-2d2j9" Nov 25 17:01:04 crc kubenswrapper[4812]: I1125 17:01:04.631413 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-6fdc4fcf86-2gwk8"] Nov 25 17:01:04 crc kubenswrapper[4812]: I1125 17:01:04.644302 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-bsfwl"] Nov 25 17:01:04 crc kubenswrapper[4812]: I1125 17:01:04.649011 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-58bb8d67cc-vnjdk"] Nov 25 17:01:04 crc kubenswrapper[4812]: W1125 17:01:04.655020 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod10dee73c_a6d8_429d_b5c0_9226eec6d1f3.slice/crio-433d1adfd58eedee164985ab86686336f0ca67544ad99d2d51766238f7d1cb26 WatchSource:0}: Error finding container 433d1adfd58eedee164985ab86686336f0ca67544ad99d2d51766238f7d1cb26: Status 404 returned error can't find the container with id 433d1adfd58eedee164985ab86686336f0ca67544ad99d2d51766238f7d1cb26 Nov 25 17:01:04 crc kubenswrapper[4812]: I1125 17:01:04.659159 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cb74df96-jnhr9"] Nov 25 17:01:04 crc kubenswrapper[4812]: I1125 17:01:04.664742 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qn86p"] Nov 25 17:01:04 crc kubenswrapper[4812]: I1125 17:01:04.667409 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-2d2j9" Nov 25 17:01:04 crc kubenswrapper[4812]: I1125 17:01:04.671359 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-66cf5c67ff-mrlbz"] Nov 25 17:01:04 crc kubenswrapper[4812]: I1125 17:01:04.680422 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-567f98c9d-h9l5r"] Nov 25 17:01:04 crc kubenswrapper[4812]: I1125 17:01:04.698829 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vncgw"] Nov 25 17:01:04 crc kubenswrapper[4812]: I1125 17:01:04.723183 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-864885998-vzgsw"] Nov 25 17:01:04 crc kubenswrapper[4812]: W1125 17:01:04.730464 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4709cc72_b3b5_4bf6_ac84_45cc1f6bc2bf.slice/crio-d0cc1a366d0a77ad0f9341d30f1bbda76e18d5576decbc1d13c610d6a785caa1 WatchSource:0}: Error finding container d0cc1a366d0a77ad0f9341d30f1bbda76e18d5576decbc1d13c610d6a785caa1: Status 404 returned error can't find the container with id d0cc1a366d0a77ad0f9341d30f1bbda76e18d5576decbc1d13c610d6a785caa1 Nov 25 17:01:04 crc kubenswrapper[4812]: W1125 17:01:04.733612 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1e943cab_36af_421d_b7a4_24010912da99.slice/crio-3d34754896cff71189e9d12bb7c0f16fbef26d8feb016215424aa63bb23bd8e9 WatchSource:0}: Error finding container 3d34754896cff71189e9d12bb7c0f16fbef26d8feb016215424aa63bb23bd8e9: Status 404 returned error can't find the container with id 3d34754896cff71189e9d12bb7c0f16fbef26d8feb016215424aa63bb23bd8e9 Nov 25 17:01:04 crc kubenswrapper[4812]: E1125 17:01:04.748719 4812 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/manila-operator@sha256:b749a5dd8bc718875c3f5e81b38d54d003be77ab92de4a3e9f9595566496a58a,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5mxp9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-58bb8d67cc-vnjdk_openstack-operators(4709cc72-b3b5-4bf6-ac84-45cc1f6bc2bf): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 17:01:04 crc kubenswrapper[4812]: E1125 17:01:04.748853 4812 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/mariadb-operator@sha256:7b90521b9e9cb4eb43c2f1c3bf85dbd068d684315f4f705b07708dd078df9d04,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-k6bkw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-cb6c4fdb7-qn86p_openstack-operators(18418f15-9ec8-48df-a761-118f45058d06): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 17:01:04 crc kubenswrapper[4812]: I1125 17:01:04.753512 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xm2ss" Nov 25 17:01:04 crc kubenswrapper[4812]: I1125 17:01:04.777720 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5db546f9d9-kx2fg"] Nov 25 17:01:04 crc kubenswrapper[4812]: I1125 17:01:04.781613 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-sb4m4"] Nov 25 17:01:04 crc kubenswrapper[4812]: I1125 17:01:04.804452 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-fd75fd47d-6kqrr"] Nov 25 17:01:04 crc kubenswrapper[4812]: E1125 17:01:04.814299 4812 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5mxp9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-58bb8d67cc-vnjdk_openstack-operators(4709cc72-b3b5-4bf6-ac84-45cc1f6bc2bf): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 17:01:04 crc kubenswrapper[4812]: E1125 17:01:04.814395 4812 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:5d49d4594c66eda7b151746cc6e1d3c67c0129b4503eeb043a64ae8ec2da6a1b,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-7hwjx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-66cf5c67ff-mrlbz_openstack-operators(1e943cab-36af-421d-b7a4-24010912da99): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 17:01:04 crc kubenswrapper[4812]: E1125 17:01:04.815652 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-vnjdk" podUID="4709cc72-b3b5-4bf6-ac84-45cc1f6bc2bf" Nov 25 17:01:04 crc kubenswrapper[4812]: E1125 17:01:04.819275 4812 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-7hwjx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-66cf5c67ff-mrlbz_openstack-operators(1e943cab-36af-421d-b7a4-24010912da99): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 17:01:04 crc kubenswrapper[4812]: E1125 17:01:04.820658 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-mrlbz" podUID="1e943cab-36af-421d-b7a4-24010912da99" Nov 25 17:01:04 crc kubenswrapper[4812]: W1125 17:01:04.822896 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbb01ec67_804d_4800_9ab4_e607563017b2.slice/crio-349ecff0c106723132484e462bc577417be23457ca2a147f86a7d52fb34d8445 WatchSource:0}: Error finding container 349ecff0c106723132484e462bc577417be23457ca2a147f86a7d52fb34d8445: Status 404 returned error can't find the container with id 349ecff0c106723132484e462bc577417be23457ca2a147f86a7d52fb34d8445 Nov 25 17:01:04 crc kubenswrapper[4812]: E1125 17:01:04.836918 4812 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:4094e7fc11a33e8e2b6768a053cafaf5b122446d23f9113d43d520cb64e9776c,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xr8g7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-5db546f9d9-kx2fg_openstack-operators(bb01ec67-804d-4800-9ab4-e607563017b2): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 17:01:04 crc kubenswrapper[4812]: E1125 17:01:04.838274 4812 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lsbwn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-sb4m4_openstack-operators(b8169d76-3e78-4510-aa23-e8d733d495a2): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 17:01:04 crc kubenswrapper[4812]: E1125 17:01:04.838309 4812 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:442c269d79163f8da75505019c02e9f0815837aaadcaddacb8e6c12df297ca13,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-d6qlt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-fd75fd47d-6kqrr_openstack-operators(15120e64-d800-43d8-b8c3-673e5854baef): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 17:01:04 crc kubenswrapper[4812]: E1125 17:01:04.839693 4812 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xr8g7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-5db546f9d9-kx2fg_openstack-operators(bb01ec67-804d-4800-9ab4-e607563017b2): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 17:01:04 crc kubenswrapper[4812]: E1125 17:01:04.839908 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-sb4m4" podUID="b8169d76-3e78-4510-aa23-e8d733d495a2" Nov 25 17:01:04 crc kubenswrapper[4812]: E1125 17:01:04.840405 4812 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-d6qlt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-fd75fd47d-6kqrr_openstack-operators(15120e64-d800-43d8-b8c3-673e5854baef): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 17:01:04 crc kubenswrapper[4812]: E1125 17:01:04.841730 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-6kqrr" podUID="15120e64-d800-43d8-b8c3-673e5854baef" Nov 25 17:01:04 crc kubenswrapper[4812]: E1125 17:01:04.841792 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kx2fg" podUID="bb01ec67-804d-4800-9ab4-e607563017b2" Nov 25 17:01:04 crc kubenswrapper[4812]: I1125 17:01:04.910584 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nccfs\" (UniqueName: \"kubernetes.io/projected/2867d1ef-c470-499f-993e-7c18782611b6-kube-api-access-nccfs\") pod \"2867d1ef-c470-499f-993e-7c18782611b6\" (UID: \"2867d1ef-c470-499f-993e-7c18782611b6\") " Nov 25 17:01:04 crc kubenswrapper[4812]: I1125 17:01:04.910757 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2867d1ef-c470-499f-993e-7c18782611b6-catalog-content\") pod \"2867d1ef-c470-499f-993e-7c18782611b6\" (UID: \"2867d1ef-c470-499f-993e-7c18782611b6\") " Nov 25 17:01:04 crc kubenswrapper[4812]: I1125 17:01:04.910826 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2867d1ef-c470-499f-993e-7c18782611b6-utilities\") pod \"2867d1ef-c470-499f-993e-7c18782611b6\" (UID: \"2867d1ef-c470-499f-993e-7c18782611b6\") " Nov 25 17:01:04 crc kubenswrapper[4812]: I1125 17:01:04.912930 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2867d1ef-c470-499f-993e-7c18782611b6-utilities" (OuterVolumeSpecName: "utilities") pod "2867d1ef-c470-499f-993e-7c18782611b6" (UID: "2867d1ef-c470-499f-993e-7c18782611b6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:01:04 crc kubenswrapper[4812]: I1125 17:01:04.923040 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2867d1ef-c470-499f-993e-7c18782611b6-kube-api-access-nccfs" (OuterVolumeSpecName: "kube-api-access-nccfs") pod "2867d1ef-c470-499f-993e-7c18782611b6" (UID: "2867d1ef-c470-499f-993e-7c18782611b6"). InnerVolumeSpecName "kube-api-access-nccfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:01:05 crc kubenswrapper[4812]: I1125 17:01:04.998892 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2867d1ef-c470-499f-993e-7c18782611b6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2867d1ef-c470-499f-993e-7c18782611b6" (UID: "2867d1ef-c470-499f-993e-7c18782611b6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:01:05 crc kubenswrapper[4812]: I1125 17:01:05.015367 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f3785053-5fa1-43b6-86f7-0182a1a49946-metrics-certs\") pod \"openstack-operator-controller-manager-585789bb75-mft5q\" (UID: \"f3785053-5fa1-43b6-86f7-0182a1a49946\") " pod="openstack-operators/openstack-operator-controller-manager-585789bb75-mft5q" Nov 25 17:01:05 crc kubenswrapper[4812]: E1125 17:01:05.015752 4812 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 25 17:01:05 crc kubenswrapper[4812]: E1125 17:01:05.015846 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f3785053-5fa1-43b6-86f7-0182a1a49946-metrics-certs podName:f3785053-5fa1-43b6-86f7-0182a1a49946 nodeName:}" failed. No retries permitted until 2025-11-25 17:01:07.015818837 +0000 UTC m=+841.855960932 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f3785053-5fa1-43b6-86f7-0182a1a49946-metrics-certs") pod "openstack-operator-controller-manager-585789bb75-mft5q" (UID: "f3785053-5fa1-43b6-86f7-0182a1a49946") : secret "metrics-server-cert" not found Nov 25 17:01:05 crc kubenswrapper[4812]: I1125 17:01:05.017028 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/f3785053-5fa1-43b6-86f7-0182a1a49946-webhook-certs\") pod \"openstack-operator-controller-manager-585789bb75-mft5q\" (UID: \"f3785053-5fa1-43b6-86f7-0182a1a49946\") " pod="openstack-operators/openstack-operator-controller-manager-585789bb75-mft5q" Nov 25 17:01:05 crc kubenswrapper[4812]: I1125 17:01:05.017362 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2867d1ef-c470-499f-993e-7c18782611b6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:01:05 crc kubenswrapper[4812]: I1125 17:01:05.017380 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2867d1ef-c470-499f-993e-7c18782611b6-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:01:05 crc kubenswrapper[4812]: I1125 17:01:05.017393 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nccfs\" (UniqueName: \"kubernetes.io/projected/2867d1ef-c470-499f-993e-7c18782611b6-kube-api-access-nccfs\") on node \"crc\" DevicePath \"\"" Nov 25 17:01:05 crc kubenswrapper[4812]: E1125 17:01:05.018046 4812 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 25 17:01:05 crc kubenswrapper[4812]: E1125 17:01:05.018084 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f3785053-5fa1-43b6-86f7-0182a1a49946-webhook-certs podName:f3785053-5fa1-43b6-86f7-0182a1a49946 nodeName:}" failed. No retries permitted until 2025-11-25 17:01:07.018071098 +0000 UTC m=+841.858213193 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/f3785053-5fa1-43b6-86f7-0182a1a49946-webhook-certs") pod "openstack-operator-controller-manager-585789bb75-mft5q" (UID: "f3785053-5fa1-43b6-86f7-0182a1a49946") : secret "webhook-server-cert" not found Nov 25 17:01:05 crc kubenswrapper[4812]: I1125 17:01:05.294077 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qn86p" event={"ID":"18418f15-9ec8-48df-a761-118f45058d06","Type":"ContainerStarted","Data":"e5fb1dbba1781cb98330a907ca753f10f9673d0c987a4a2f21643adc9eb5db66"} Nov 25 17:01:05 crc kubenswrapper[4812]: I1125 17:01:05.296444 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cb74df96-jnhr9" event={"ID":"13981b57-58d8-42f8-a833-f9760f06df20","Type":"ContainerStarted","Data":"b247db4e8031f3af7062678bef98714181b58db03be04c8787f91462bb82d326"} Nov 25 17:01:05 crc kubenswrapper[4812]: I1125 17:01:05.299615 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-6kqrr" event={"ID":"15120e64-d800-43d8-b8c3-673e5854baef","Type":"ContainerStarted","Data":"89220eb819f323ab87666074668618a5ca9c30e6cdf7603e59d3044fca61509c"} Nov 25 17:01:05 crc kubenswrapper[4812]: I1125 17:01:05.322420 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-h9l5r" event={"ID":"07907797-7edd-48e0-bb69-e42ad740f173","Type":"ContainerStarted","Data":"e095ede24710d5bab5eb142571d8487e31e7842970bd90c52ab7d06c71197eb5"} Nov 25 17:01:05 crc kubenswrapper[4812]: E1125 17:01:05.323017 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:442c269d79163f8da75505019c02e9f0815837aaadcaddacb8e6c12df297ca13\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-6kqrr" podUID="15120e64-d800-43d8-b8c3-673e5854baef" Nov 25 17:01:05 crc kubenswrapper[4812]: I1125 17:01:05.325818 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-6jwl5" event={"ID":"4c649e41-10e8-4eee-bfc0-bf1a9409e421","Type":"ContainerStarted","Data":"ed8280ce38d59a8da15d2c5a7f3c4025cc1e597ba4de5128486611e9b26592c0"} Nov 25 17:01:05 crc kubenswrapper[4812]: I1125 17:01:05.330556 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-2d2j9"] Nov 25 17:01:05 crc kubenswrapper[4812]: I1125 17:01:05.338157 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-mrlbz" event={"ID":"1e943cab-36af-421d-b7a4-24010912da99","Type":"ContainerStarted","Data":"3d34754896cff71189e9d12bb7c0f16fbef26d8feb016215424aa63bb23bd8e9"} Nov 25 17:01:05 crc kubenswrapper[4812]: E1125 17:01:05.339983 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:5d49d4594c66eda7b151746cc6e1d3c67c0129b4503eeb043a64ae8ec2da6a1b\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-mrlbz" podUID="1e943cab-36af-421d-b7a4-24010912da99" Nov 25 17:01:05 crc kubenswrapper[4812]: I1125 17:01:05.342406 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kx2fg" event={"ID":"bb01ec67-804d-4800-9ab4-e607563017b2","Type":"ContainerStarted","Data":"349ecff0c106723132484e462bc577417be23457ca2a147f86a7d52fb34d8445"} Nov 25 17:01:05 crc kubenswrapper[4812]: I1125 17:01:05.344077 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-bsfwl" event={"ID":"ab19d0cd-1e29-41af-892c-8f25f12b7f1c","Type":"ContainerStarted","Data":"a0f4dfb1fda3fd057b916e777ed6d7e6bef01fb276dac1030a20aefa094f8f8b"} Nov 25 17:01:05 crc kubenswrapper[4812]: E1125 17:01:05.345175 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:4094e7fc11a33e8e2b6768a053cafaf5b122446d23f9113d43d520cb64e9776c\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kx2fg" podUID="bb01ec67-804d-4800-9ab4-e607563017b2" Nov 25 17:01:05 crc kubenswrapper[4812]: I1125 17:01:05.345292 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-2gwk8" event={"ID":"10dee73c-a6d8-429d-b5c0-9226eec6d1f3","Type":"ContainerStarted","Data":"433d1adfd58eedee164985ab86686336f0ca67544ad99d2d51766238f7d1cb26"} Nov 25 17:01:05 crc kubenswrapper[4812]: I1125 17:01:05.347803 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-sb4m4" event={"ID":"b8169d76-3e78-4510-aa23-e8d733d495a2","Type":"ContainerStarted","Data":"8f23aa66c46d979414c7e943050cc72c1dcd5b563c606188c9d76164fa8af49b"} Nov 25 17:01:05 crc kubenswrapper[4812]: E1125 17:01:05.349022 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-sb4m4" podUID="b8169d76-3e78-4510-aa23-e8d733d495a2" Nov 25 17:01:05 crc kubenswrapper[4812]: I1125 17:01:05.349380 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-vzgsw" event={"ID":"98133284-26db-4073-a43c-f9572476153c","Type":"ContainerStarted","Data":"a62d6442f048b1610f8f0f2aba9e6b7e23b9375125499e3b1d9ae27ed665cede"} Nov 25 17:01:05 crc kubenswrapper[4812]: I1125 17:01:05.350430 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-vnjdk" event={"ID":"4709cc72-b3b5-4bf6-ac84-45cc1f6bc2bf","Type":"ContainerStarted","Data":"d0cc1a366d0a77ad0f9341d30f1bbda76e18d5576decbc1d13c610d6a785caa1"} Nov 25 17:01:05 crc kubenswrapper[4812]: I1125 17:01:05.351849 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vncgw" event={"ID":"d6f00506-8ef7-46ec-9492-01e0005f90d3","Type":"ContainerStarted","Data":"b89d759a767c4f29f774d166d25c111e8a9caa9c2702a349fd30ad2e9479fd2d"} Nov 25 17:01:05 crc kubenswrapper[4812]: E1125 17:01:05.351882 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:b749a5dd8bc718875c3f5e81b38d54d003be77ab92de4a3e9f9595566496a58a\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-vnjdk" podUID="4709cc72-b3b5-4bf6-ac84-45cc1f6bc2bf" Nov 25 17:01:05 crc kubenswrapper[4812]: I1125 17:01:05.366181 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xm2ss" event={"ID":"2867d1ef-c470-499f-993e-7c18782611b6","Type":"ContainerDied","Data":"7094ab3b1bd7497d04963a9e068f0b20a7dab44c27a42afae535140d6ed366df"} Nov 25 17:01:05 crc kubenswrapper[4812]: I1125 17:01:05.366755 4812 scope.go:117] "RemoveContainer" containerID="a6b1b4eacb5ecd6983d576a32204c989fe4089c96bd54dbc7996c58a01ee3bd1" Nov 25 17:01:05 crc kubenswrapper[4812]: I1125 17:01:05.366939 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xm2ss" Nov 25 17:01:05 crc kubenswrapper[4812]: I1125 17:01:05.433040 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xm2ss"] Nov 25 17:01:05 crc kubenswrapper[4812]: I1125 17:01:05.442885 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-xm2ss"] Nov 25 17:01:05 crc kubenswrapper[4812]: I1125 17:01:05.866112 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2867d1ef-c470-499f-993e-7c18782611b6" path="/var/lib/kubelet/pods/2867d1ef-c470-499f-993e-7c18782611b6/volumes" Nov 25 17:01:06 crc kubenswrapper[4812]: E1125 17:01:06.374779 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-sb4m4" podUID="b8169d76-3e78-4510-aa23-e8d733d495a2" Nov 25 17:01:06 crc kubenswrapper[4812]: E1125 17:01:06.375861 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:442c269d79163f8da75505019c02e9f0815837aaadcaddacb8e6c12df297ca13\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-6kqrr" podUID="15120e64-d800-43d8-b8c3-673e5854baef" Nov 25 17:01:06 crc kubenswrapper[4812]: E1125 17:01:06.379122 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:4094e7fc11a33e8e2b6768a053cafaf5b122446d23f9113d43d520cb64e9776c\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kx2fg" podUID="bb01ec67-804d-4800-9ab4-e607563017b2" Nov 25 17:01:06 crc kubenswrapper[4812]: E1125 17:01:06.379185 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:b749a5dd8bc718875c3f5e81b38d54d003be77ab92de4a3e9f9595566496a58a\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-vnjdk" podUID="4709cc72-b3b5-4bf6-ac84-45cc1f6bc2bf" Nov 25 17:01:06 crc kubenswrapper[4812]: E1125 17:01:06.385848 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:5d49d4594c66eda7b151746cc6e1d3c67c0129b4503eeb043a64ae8ec2da6a1b\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-mrlbz" podUID="1e943cab-36af-421d-b7a4-24010912da99" Nov 25 17:01:07 crc kubenswrapper[4812]: I1125 17:01:07.062299 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f3785053-5fa1-43b6-86f7-0182a1a49946-metrics-certs\") pod \"openstack-operator-controller-manager-585789bb75-mft5q\" (UID: \"f3785053-5fa1-43b6-86f7-0182a1a49946\") " pod="openstack-operators/openstack-operator-controller-manager-585789bb75-mft5q" Nov 25 17:01:07 crc kubenswrapper[4812]: I1125 17:01:07.064001 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/f3785053-5fa1-43b6-86f7-0182a1a49946-webhook-certs\") pod \"openstack-operator-controller-manager-585789bb75-mft5q\" (UID: \"f3785053-5fa1-43b6-86f7-0182a1a49946\") " pod="openstack-operators/openstack-operator-controller-manager-585789bb75-mft5q" Nov 25 17:01:07 crc kubenswrapper[4812]: I1125 17:01:07.069192 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f3785053-5fa1-43b6-86f7-0182a1a49946-metrics-certs\") pod \"openstack-operator-controller-manager-585789bb75-mft5q\" (UID: \"f3785053-5fa1-43b6-86f7-0182a1a49946\") " pod="openstack-operators/openstack-operator-controller-manager-585789bb75-mft5q" Nov 25 17:01:07 crc kubenswrapper[4812]: I1125 17:01:07.069306 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/f3785053-5fa1-43b6-86f7-0182a1a49946-webhook-certs\") pod \"openstack-operator-controller-manager-585789bb75-mft5q\" (UID: \"f3785053-5fa1-43b6-86f7-0182a1a49946\") " pod="openstack-operators/openstack-operator-controller-manager-585789bb75-mft5q" Nov 25 17:01:07 crc kubenswrapper[4812]: I1125 17:01:07.081185 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-4bq5l"] Nov 25 17:01:07 crc kubenswrapper[4812]: E1125 17:01:07.081630 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2867d1ef-c470-499f-993e-7c18782611b6" containerName="registry-server" Nov 25 17:01:07 crc kubenswrapper[4812]: I1125 17:01:07.081647 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="2867d1ef-c470-499f-993e-7c18782611b6" containerName="registry-server" Nov 25 17:01:07 crc kubenswrapper[4812]: E1125 17:01:07.081670 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2867d1ef-c470-499f-993e-7c18782611b6" containerName="extract-utilities" Nov 25 17:01:07 crc kubenswrapper[4812]: I1125 17:01:07.081679 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="2867d1ef-c470-499f-993e-7c18782611b6" containerName="extract-utilities" Nov 25 17:01:07 crc kubenswrapper[4812]: E1125 17:01:07.081694 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2867d1ef-c470-499f-993e-7c18782611b6" containerName="extract-content" Nov 25 17:01:07 crc kubenswrapper[4812]: I1125 17:01:07.081703 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="2867d1ef-c470-499f-993e-7c18782611b6" containerName="extract-content" Nov 25 17:01:07 crc kubenswrapper[4812]: I1125 17:01:07.081875 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="2867d1ef-c470-499f-993e-7c18782611b6" containerName="registry-server" Nov 25 17:01:07 crc kubenswrapper[4812]: I1125 17:01:07.083235 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4bq5l" Nov 25 17:01:07 crc kubenswrapper[4812]: I1125 17:01:07.095413 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4bq5l"] Nov 25 17:01:07 crc kubenswrapper[4812]: I1125 17:01:07.214697 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-5k96m" Nov 25 17:01:07 crc kubenswrapper[4812]: I1125 17:01:07.223196 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-585789bb75-mft5q" Nov 25 17:01:07 crc kubenswrapper[4812]: I1125 17:01:07.267871 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n9v8z\" (UniqueName: \"kubernetes.io/projected/eecbbe13-dde4-4f80-9472-5050409a5a43-kube-api-access-n9v8z\") pod \"community-operators-4bq5l\" (UID: \"eecbbe13-dde4-4f80-9472-5050409a5a43\") " pod="openshift-marketplace/community-operators-4bq5l" Nov 25 17:01:07 crc kubenswrapper[4812]: I1125 17:01:07.267998 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eecbbe13-dde4-4f80-9472-5050409a5a43-catalog-content\") pod \"community-operators-4bq5l\" (UID: \"eecbbe13-dde4-4f80-9472-5050409a5a43\") " pod="openshift-marketplace/community-operators-4bq5l" Nov 25 17:01:07 crc kubenswrapper[4812]: I1125 17:01:07.268034 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eecbbe13-dde4-4f80-9472-5050409a5a43-utilities\") pod \"community-operators-4bq5l\" (UID: \"eecbbe13-dde4-4f80-9472-5050409a5a43\") " pod="openshift-marketplace/community-operators-4bq5l" Nov 25 17:01:07 crc kubenswrapper[4812]: W1125 17:01:07.346052 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod54850143_f77e_4d59_bcc4_c5bd3bc85880.slice/crio-8c482cb6510d0518ee0aa87fd2bed8946db8da832d8640abe5e344f597ee0247 WatchSource:0}: Error finding container 8c482cb6510d0518ee0aa87fd2bed8946db8da832d8640abe5e344f597ee0247: Status 404 returned error can't find the container with id 8c482cb6510d0518ee0aa87fd2bed8946db8da832d8640abe5e344f597ee0247 Nov 25 17:01:07 crc kubenswrapper[4812]: I1125 17:01:07.369643 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n9v8z\" (UniqueName: \"kubernetes.io/projected/eecbbe13-dde4-4f80-9472-5050409a5a43-kube-api-access-n9v8z\") pod \"community-operators-4bq5l\" (UID: \"eecbbe13-dde4-4f80-9472-5050409a5a43\") " pod="openshift-marketplace/community-operators-4bq5l" Nov 25 17:01:07 crc kubenswrapper[4812]: I1125 17:01:07.369755 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eecbbe13-dde4-4f80-9472-5050409a5a43-catalog-content\") pod \"community-operators-4bq5l\" (UID: \"eecbbe13-dde4-4f80-9472-5050409a5a43\") " pod="openshift-marketplace/community-operators-4bq5l" Nov 25 17:01:07 crc kubenswrapper[4812]: I1125 17:01:07.369817 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eecbbe13-dde4-4f80-9472-5050409a5a43-utilities\") pod \"community-operators-4bq5l\" (UID: \"eecbbe13-dde4-4f80-9472-5050409a5a43\") " pod="openshift-marketplace/community-operators-4bq5l" Nov 25 17:01:07 crc kubenswrapper[4812]: I1125 17:01:07.370422 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eecbbe13-dde4-4f80-9472-5050409a5a43-catalog-content\") pod \"community-operators-4bq5l\" (UID: \"eecbbe13-dde4-4f80-9472-5050409a5a43\") " pod="openshift-marketplace/community-operators-4bq5l" Nov 25 17:01:07 crc kubenswrapper[4812]: I1125 17:01:07.370490 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eecbbe13-dde4-4f80-9472-5050409a5a43-utilities\") pod \"community-operators-4bq5l\" (UID: \"eecbbe13-dde4-4f80-9472-5050409a5a43\") " pod="openshift-marketplace/community-operators-4bq5l" Nov 25 17:01:07 crc kubenswrapper[4812]: I1125 17:01:07.390677 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n9v8z\" (UniqueName: \"kubernetes.io/projected/eecbbe13-dde4-4f80-9472-5050409a5a43-kube-api-access-n9v8z\") pod \"community-operators-4bq5l\" (UID: \"eecbbe13-dde4-4f80-9472-5050409a5a43\") " pod="openshift-marketplace/community-operators-4bq5l" Nov 25 17:01:07 crc kubenswrapper[4812]: I1125 17:01:07.392729 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-2d2j9" event={"ID":"54850143-f77e-4d59-bcc4-c5bd3bc85880","Type":"ContainerStarted","Data":"8c482cb6510d0518ee0aa87fd2bed8946db8da832d8640abe5e344f597ee0247"} Nov 25 17:01:07 crc kubenswrapper[4812]: I1125 17:01:07.443792 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4bq5l" Nov 25 17:01:12 crc kubenswrapper[4812]: I1125 17:01:12.795911 4812 scope.go:117] "RemoveContainer" containerID="c76ba04ff6eafd9b7d08a5d92bd2776430225f335c95870646c3fdff161a7419" Nov 25 17:01:19 crc kubenswrapper[4812]: E1125 17:01:19.579436 4812 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/neutron-operator@sha256:207578cb433471cc1a79c21a808c8a15489d1d3c9fa77e29f3f697c33917fec6" Nov 25 17:01:19 crc kubenswrapper[4812]: E1125 17:01:19.580218 4812 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/neutron-operator@sha256:207578cb433471cc1a79c21a808c8a15489d1d3c9fa77e29f3f697c33917fec6,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6b8fl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-7c57c8bbc4-vncgw_openstack-operators(d6f00506-8ef7-46ec-9492-01e0005f90d3): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 17:01:21 crc kubenswrapper[4812]: E1125 17:01:21.733784 4812 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f" Nov 25 17:01:21 crc kubenswrapper[4812]: E1125 17:01:21.734318 4812 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-swl26,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-864885998-vzgsw_openstack-operators(98133284-26db-4073-a43c-f9572476153c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 17:01:22 crc kubenswrapper[4812]: I1125 17:01:22.151427 4812 scope.go:117] "RemoveContainer" containerID="679ca533e42cd418ea851f7c08ebc4fe59adc4394cac550ce98544182de3e83e" Nov 25 17:01:22 crc kubenswrapper[4812]: I1125 17:01:22.891462 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4bq5l"] Nov 25 17:01:24 crc kubenswrapper[4812]: I1125 17:01:24.687908 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-585789bb75-mft5q"] Nov 25 17:01:25 crc kubenswrapper[4812]: I1125 17:01:25.538885 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4bq5l" event={"ID":"eecbbe13-dde4-4f80-9472-5050409a5a43","Type":"ContainerStarted","Data":"d82f704fa693291cf03272bcd3b48692d2d57c7173d83f3fdb0d946ea76b1f0a"} Nov 25 17:01:25 crc kubenswrapper[4812]: I1125 17:01:25.540441 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-clwgz" event={"ID":"bac38f31-ec39-46b9-9bac-2920864fb8a2","Type":"ContainerStarted","Data":"3114fd0fb85bc1c0d9890f4b33a6c01898d8ae805772bb83434dabd836391fa9"} Nov 25 17:01:25 crc kubenswrapper[4812]: W1125 17:01:25.649707 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf3785053_5fa1_43b6_86f7_0182a1a49946.slice/crio-ec8af900e30f6fc465b0410103e5d07942d9ecab06f6c60e7950dc254dee1ed6 WatchSource:0}: Error finding container ec8af900e30f6fc465b0410103e5d07942d9ecab06f6c60e7950dc254dee1ed6: Status 404 returned error can't find the container with id ec8af900e30f6fc465b0410103e5d07942d9ecab06f6c60e7950dc254dee1ed6 Nov 25 17:01:26 crc kubenswrapper[4812]: E1125 17:01:26.100817 4812 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/system.slice/systemd-tmpfiles-clean.service\": RecentStats: unable to find data in memory cache]" Nov 25 17:01:26 crc kubenswrapper[4812]: I1125 17:01:26.572450 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-2gwk8" event={"ID":"10dee73c-a6d8-429d-b5c0-9226eec6d1f3","Type":"ContainerStarted","Data":"c497239c53b0bb0aea6346e5b2828b86590ebf4ea5affca580af0e62937a59b3"} Nov 25 17:01:26 crc kubenswrapper[4812]: I1125 17:01:26.584476 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-858778c9dc-ncdvr" event={"ID":"16fc0b64-6599-4b8b-a0b7-b609dab9dd31","Type":"ContainerStarted","Data":"451f1386770552f65be8500268dacc98d675844b487465ac18bf1827f0b7b56a"} Nov 25 17:01:26 crc kubenswrapper[4812]: I1125 17:01:26.588502 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7dd4g" event={"ID":"2374c36a-5118-4a90-985c-1f80597d73af","Type":"ContainerStarted","Data":"3969c6027b71597f9d30beaa578c36aebed487523664a83323de6a99f3da11b9"} Nov 25 17:01:26 crc kubenswrapper[4812]: I1125 17:01:26.592103 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-jnszn" event={"ID":"36e2e4fb-0369-4347-b8e6-3f4bb2e6f88b","Type":"ContainerStarted","Data":"04762816d14047ca3f7d0ca0fbd8f44c397c3905f2c884ae8fdc2d500a3f0bbc"} Nov 25 17:01:26 crc kubenswrapper[4812]: I1125 17:01:26.600116 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-zzhb4" event={"ID":"7f72311f-8622-43f6-b499-8b52318b0e2a","Type":"ContainerStarted","Data":"a57d9f197214c8f522b46a33a7f8df91bce62137fe97771b61ffdd37fdc75b36"} Nov 25 17:01:26 crc kubenswrapper[4812]: I1125 17:01:26.601830 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-585789bb75-mft5q" event={"ID":"f3785053-5fa1-43b6-86f7-0182a1a49946","Type":"ContainerStarted","Data":"ec8af900e30f6fc465b0410103e5d07942d9ecab06f6c60e7950dc254dee1ed6"} Nov 25 17:01:26 crc kubenswrapper[4812]: I1125 17:01:26.614463 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cb74df96-jnhr9" event={"ID":"13981b57-58d8-42f8-a833-f9760f06df20","Type":"ContainerStarted","Data":"860702da5ff8378a85ab73909c5f0c8a56a1dea49f0bd2eb619657a06847753c"} Nov 25 17:01:26 crc kubenswrapper[4812]: I1125 17:01:26.621159 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-h9l5r" event={"ID":"07907797-7edd-48e0-bb69-e42ad740f173","Type":"ContainerStarted","Data":"0f80e5511d8dc1a0e4560528ad2a6eade741bdab9fcd954359b8192391929042"} Nov 25 17:01:26 crc kubenswrapper[4812]: I1125 17:01:26.632236 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-6jwl5" event={"ID":"4c649e41-10e8-4eee-bfc0-bf1a9409e421","Type":"ContainerStarted","Data":"32cefc8394ca80e9b833f9439bdf862cf04c66049d85a0c77578bb82ad7e1848"} Nov 25 17:01:27 crc kubenswrapper[4812]: I1125 17:01:27.655841 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-mrlbz" event={"ID":"1e943cab-36af-421d-b7a4-24010912da99","Type":"ContainerStarted","Data":"a4ce16e6f8c8d3c5f3a6110935c634da778d04f55faa4605827a16968d764769"} Nov 25 17:01:27 crc kubenswrapper[4812]: I1125 17:01:27.670303 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kx2fg" event={"ID":"bb01ec67-804d-4800-9ab4-e607563017b2","Type":"ContainerStarted","Data":"686a79e76b23ffc21086bfbd7749cdefa43a8a61ff9ee674b3064c65c9f2e5e4"} Nov 25 17:01:27 crc kubenswrapper[4812]: I1125 17:01:27.673041 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-pljcc" event={"ID":"48707b31-d8f9-4a7e-a8b9-2728249f0a49","Type":"ContainerStarted","Data":"3cf1764fe170a250162b704ab351d0ad53470fbf6aa1f28928a046ea271b57b0"} Nov 25 17:01:27 crc kubenswrapper[4812]: I1125 17:01:27.678866 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-69b8c86946-ww5m5" event={"ID":"d53b5c25-d66b-46c5-80a5-998eb9007598","Type":"ContainerStarted","Data":"f9154542015193ff79bc777ab7bb0962aa009ab2cb2f293a239ff779bf185158"} Nov 25 17:01:27 crc kubenswrapper[4812]: I1125 17:01:27.684628 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jth28" event={"ID":"39d8b8c1-7015-487a-9263-25531a65c48c","Type":"ContainerStarted","Data":"dc3206865b8b1fa593acbe457ead7ff70274aae9cbb488665ef47fce67e7ef95"} Nov 25 17:01:27 crc kubenswrapper[4812]: I1125 17:01:27.686849 4812 generic.go:334] "Generic (PLEG): container finished" podID="eecbbe13-dde4-4f80-9472-5050409a5a43" containerID="44767280394cc96a30cbb2bd45aceb769adb85098f3118f1dfb9c24896d46afc" exitCode=0 Nov 25 17:01:27 crc kubenswrapper[4812]: I1125 17:01:27.686902 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4bq5l" event={"ID":"eecbbe13-dde4-4f80-9472-5050409a5a43","Type":"ContainerDied","Data":"44767280394cc96a30cbb2bd45aceb769adb85098f3118f1dfb9c24896d46afc"} Nov 25 17:01:27 crc kubenswrapper[4812]: I1125 17:01:27.689889 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-2d2j9" event={"ID":"54850143-f77e-4d59-bcc4-c5bd3bc85880","Type":"ContainerStarted","Data":"d3636554fd6483c45f349ea319aac606c837dc129a8c4b6677981fe3f3e094b4"} Nov 25 17:01:27 crc kubenswrapper[4812]: I1125 17:01:27.692033 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-bsfwl" event={"ID":"ab19d0cd-1e29-41af-892c-8f25f12b7f1c","Type":"ContainerStarted","Data":"576daabf7b43c05aeff710aea1f70add4856e5cfdbf21cbe8cb0fa3e3b092d5b"} Nov 25 17:01:27 crc kubenswrapper[4812]: I1125 17:01:27.693554 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-585789bb75-mft5q" event={"ID":"f3785053-5fa1-43b6-86f7-0182a1a49946","Type":"ContainerStarted","Data":"fe08c810d96a8afe62aca260ff216aeb0bd8cbb3e9add8d23ff941fa3dc0512e"} Nov 25 17:01:27 crc kubenswrapper[4812]: I1125 17:01:27.693738 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-585789bb75-mft5q" Nov 25 17:01:27 crc kubenswrapper[4812]: I1125 17:01:27.730148 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-585789bb75-mft5q" podStartSLOduration=24.73012822 podStartE2EDuration="24.73012822s" podCreationTimestamp="2025-11-25 17:01:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:01:27.728658444 +0000 UTC m=+862.568800549" watchObservedRunningTime="2025-11-25 17:01:27.73012822 +0000 UTC m=+862.570270315" Nov 25 17:01:28 crc kubenswrapper[4812]: E1125 17:01:28.682450 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qn86p" podUID="18418f15-9ec8-48df-a761-118f45058d06" Nov 25 17:01:28 crc kubenswrapper[4812]: I1125 17:01:28.708041 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-6kqrr" event={"ID":"15120e64-d800-43d8-b8c3-673e5854baef","Type":"ContainerStarted","Data":"c05d8eea76f30d3a79a3aa0ab6c143da1955790b3bf84115f33199956c307640"} Nov 25 17:01:28 crc kubenswrapper[4812]: I1125 17:01:28.708087 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-6kqrr" event={"ID":"15120e64-d800-43d8-b8c3-673e5854baef","Type":"ContainerStarted","Data":"f494fb07ebdc8dc1cc239fd2d67ed84cf5d854dc59436114131a94e47d2a56ef"} Nov 25 17:01:28 crc kubenswrapper[4812]: I1125 17:01:28.709199 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-6kqrr" Nov 25 17:01:28 crc kubenswrapper[4812]: I1125 17:01:28.711284 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-vnjdk" event={"ID":"4709cc72-b3b5-4bf6-ac84-45cc1f6bc2bf","Type":"ContainerStarted","Data":"a36bab60c803b3f20aad316114a6690e56d66edd2b1bad28e3a95f718ad314bf"} Nov 25 17:01:28 crc kubenswrapper[4812]: I1125 17:01:28.716214 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-2gwk8" event={"ID":"10dee73c-a6d8-429d-b5c0-9226eec6d1f3","Type":"ContainerStarted","Data":"8bb7fdd47f4b0bbd5a1a3b5147dc1d072527b1ba5db39881827c1caa54d0c281"} Nov 25 17:01:28 crc kubenswrapper[4812]: I1125 17:01:28.716663 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-2gwk8" Nov 25 17:01:28 crc kubenswrapper[4812]: I1125 17:01:28.721451 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-6jwl5" event={"ID":"4c649e41-10e8-4eee-bfc0-bf1a9409e421","Type":"ContainerStarted","Data":"68354931fc3b0d696933745c122ee07e7ebbc9889afc85529c5992732064df81"} Nov 25 17:01:28 crc kubenswrapper[4812]: I1125 17:01:28.721690 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-6jwl5" Nov 25 17:01:28 crc kubenswrapper[4812]: I1125 17:01:28.724697 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-mrlbz" event={"ID":"1e943cab-36af-421d-b7a4-24010912da99","Type":"ContainerStarted","Data":"c5c9b4e6985e202f8ab643cca1d55e4fae828c2b02aa7f52949c927121044551"} Nov 25 17:01:28 crc kubenswrapper[4812]: I1125 17:01:28.725004 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-mrlbz" Nov 25 17:01:28 crc kubenswrapper[4812]: I1125 17:01:28.737236 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-6kqrr" podStartSLOduration=5.458391787 podStartE2EDuration="26.73721828s" podCreationTimestamp="2025-11-25 17:01:02 +0000 UTC" firstStartedPulling="2025-11-25 17:01:04.838147681 +0000 UTC m=+839.678289776" lastFinishedPulling="2025-11-25 17:01:26.116974174 +0000 UTC m=+860.957116269" observedRunningTime="2025-11-25 17:01:28.733746051 +0000 UTC m=+863.573888146" watchObservedRunningTime="2025-11-25 17:01:28.73721828 +0000 UTC m=+863.577360375" Nov 25 17:01:28 crc kubenswrapper[4812]: I1125 17:01:28.739845 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qn86p" event={"ID":"18418f15-9ec8-48df-a761-118f45058d06","Type":"ContainerStarted","Data":"039cff8e155c3ca05e5085250c923f55d543de9f124ef8bff78011d0d845a2d6"} Nov 25 17:01:28 crc kubenswrapper[4812]: I1125 17:01:28.748334 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-2d2j9" Nov 25 17:01:28 crc kubenswrapper[4812]: I1125 17:01:28.761758 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-2gwk8" podStartSLOduration=3.23234658 podStartE2EDuration="26.761731197s" podCreationTimestamp="2025-11-25 17:01:02 +0000 UTC" firstStartedPulling="2025-11-25 17:01:04.66028694 +0000 UTC m=+839.500429035" lastFinishedPulling="2025-11-25 17:01:28.189671557 +0000 UTC m=+863.029813652" observedRunningTime="2025-11-25 17:01:28.755141798 +0000 UTC m=+863.595283893" watchObservedRunningTime="2025-11-25 17:01:28.761731197 +0000 UTC m=+863.601873292" Nov 25 17:01:28 crc kubenswrapper[4812]: I1125 17:01:28.762304 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-sb4m4" event={"ID":"b8169d76-3e78-4510-aa23-e8d733d495a2","Type":"ContainerStarted","Data":"20afee5fa98cb3825815039bb3bc7125d635c39d86cc179bcbd07bf4fc145d43"} Nov 25 17:01:28 crc kubenswrapper[4812]: I1125 17:01:28.789716 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-6jwl5" podStartSLOduration=3.230019147 podStartE2EDuration="26.789695312s" podCreationTimestamp="2025-11-25 17:01:02 +0000 UTC" firstStartedPulling="2025-11-25 17:01:04.657820263 +0000 UTC m=+839.497962358" lastFinishedPulling="2025-11-25 17:01:28.217496428 +0000 UTC m=+863.057638523" observedRunningTime="2025-11-25 17:01:28.778157316 +0000 UTC m=+863.618299411" watchObservedRunningTime="2025-11-25 17:01:28.789695312 +0000 UTC m=+863.629837417" Nov 25 17:01:28 crc kubenswrapper[4812]: I1125 17:01:28.811013 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-mrlbz" podStartSLOduration=3.360429996 podStartE2EDuration="26.810997506s" podCreationTimestamp="2025-11-25 17:01:02 +0000 UTC" firstStartedPulling="2025-11-25 17:01:04.814191204 +0000 UTC m=+839.654333299" lastFinishedPulling="2025-11-25 17:01:28.264758704 +0000 UTC m=+863.104900809" observedRunningTime="2025-11-25 17:01:28.799010236 +0000 UTC m=+863.639152331" watchObservedRunningTime="2025-11-25 17:01:28.810997506 +0000 UTC m=+863.651139601" Nov 25 17:01:28 crc kubenswrapper[4812]: I1125 17:01:28.831736 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-sb4m4" podStartSLOduration=4.55293509 podStartE2EDuration="25.831717322s" podCreationTimestamp="2025-11-25 17:01:03 +0000 UTC" firstStartedPulling="2025-11-25 17:01:04.838199482 +0000 UTC m=+839.678341577" lastFinishedPulling="2025-11-25 17:01:26.116981694 +0000 UTC m=+860.957123809" observedRunningTime="2025-11-25 17:01:28.813418913 +0000 UTC m=+863.653561008" watchObservedRunningTime="2025-11-25 17:01:28.831717322 +0000 UTC m=+863.671859417" Nov 25 17:01:28 crc kubenswrapper[4812]: I1125 17:01:28.860995 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-2d2j9" podStartSLOduration=5.988912268 podStartE2EDuration="26.860975458s" podCreationTimestamp="2025-11-25 17:01:02 +0000 UTC" firstStartedPulling="2025-11-25 17:01:07.350508909 +0000 UTC m=+842.190651004" lastFinishedPulling="2025-11-25 17:01:28.222572099 +0000 UTC m=+863.062714194" observedRunningTime="2025-11-25 17:01:28.860501833 +0000 UTC m=+863.700643928" watchObservedRunningTime="2025-11-25 17:01:28.860975458 +0000 UTC m=+863.701117553" Nov 25 17:01:28 crc kubenswrapper[4812]: E1125 17:01:28.901052 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/watcher-operator-controller-manager-864885998-vzgsw" podUID="98133284-26db-4073-a43c-f9572476153c" Nov 25 17:01:29 crc kubenswrapper[4812]: E1125 17:01:29.165976 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vncgw" podUID="d6f00506-8ef7-46ec-9492-01e0005f90d3" Nov 25 17:01:29 crc kubenswrapper[4812]: I1125 17:01:29.770242 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-h9l5r" event={"ID":"07907797-7edd-48e0-bb69-e42ad740f173","Type":"ContainerStarted","Data":"502c473b060cf356e4eb7085a67abdb6ad08f61924de906224326306b6e0affc"} Nov 25 17:01:29 crc kubenswrapper[4812]: I1125 17:01:29.770557 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-h9l5r" Nov 25 17:01:29 crc kubenswrapper[4812]: I1125 17:01:29.773819 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-bsfwl" event={"ID":"ab19d0cd-1e29-41af-892c-8f25f12b7f1c","Type":"ContainerStarted","Data":"a8e3e222ec24b32b96a095524a922d1cae479ee8c2b9c991e83adc4c45f23d97"} Nov 25 17:01:29 crc kubenswrapper[4812]: I1125 17:01:29.773992 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-bsfwl" Nov 25 17:01:29 crc kubenswrapper[4812]: I1125 17:01:29.777925 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-vnjdk" event={"ID":"4709cc72-b3b5-4bf6-ac84-45cc1f6bc2bf","Type":"ContainerStarted","Data":"ce7900ec1a7c3d9b6b0a1d742827185bb6fb8872067f8532d894ea1f2c41de65"} Nov 25 17:01:29 crc kubenswrapper[4812]: I1125 17:01:29.778068 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-vnjdk" Nov 25 17:01:29 crc kubenswrapper[4812]: I1125 17:01:29.779736 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-69b8c86946-ww5m5" event={"ID":"d53b5c25-d66b-46c5-80a5-998eb9007598","Type":"ContainerStarted","Data":"97925fb11d30fef96185c655c9fcff6c010128c57be095c7f52bdd457e5da213"} Nov 25 17:01:29 crc kubenswrapper[4812]: I1125 17:01:29.779835 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-69b8c86946-ww5m5" Nov 25 17:01:29 crc kubenswrapper[4812]: I1125 17:01:29.781235 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jth28" event={"ID":"39d8b8c1-7015-487a-9263-25531a65c48c","Type":"ContainerStarted","Data":"80a5773f30d7b1b5646e6f7887ed68a8c6ace717600772d096855fd00f68753e"} Nov 25 17:01:29 crc kubenswrapper[4812]: I1125 17:01:29.781328 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jth28" Nov 25 17:01:29 crc kubenswrapper[4812]: I1125 17:01:29.783140 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kx2fg" event={"ID":"bb01ec67-804d-4800-9ab4-e607563017b2","Type":"ContainerStarted","Data":"77b059f130ac80b8925cfde92d1533421edb306e158966df32cc12f71ba1885f"} Nov 25 17:01:29 crc kubenswrapper[4812]: I1125 17:01:29.783264 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kx2fg" Nov 25 17:01:29 crc kubenswrapper[4812]: I1125 17:01:29.784629 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-2d2j9" event={"ID":"54850143-f77e-4d59-bcc4-c5bd3bc85880","Type":"ContainerStarted","Data":"dc6274c5271224972c0b48a4641522ea421b6b5539eeb456c8cf8ee8422bad1e"} Nov 25 17:01:29 crc kubenswrapper[4812]: I1125 17:01:29.795114 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cb74df96-jnhr9" event={"ID":"13981b57-58d8-42f8-a833-f9760f06df20","Type":"ContainerStarted","Data":"c063dca5867010eb421149e3be9252a42275acd649aed78fba55030e2045cde0"} Nov 25 17:01:29 crc kubenswrapper[4812]: I1125 17:01:29.795252 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5cb74df96-jnhr9" Nov 25 17:01:29 crc kubenswrapper[4812]: I1125 17:01:29.795465 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-h9l5r" podStartSLOduration=4.212982712 podStartE2EDuration="27.79545142s" podCreationTimestamp="2025-11-25 17:01:02 +0000 UTC" firstStartedPulling="2025-11-25 17:01:04.728998805 +0000 UTC m=+839.569140900" lastFinishedPulling="2025-11-25 17:01:28.311467513 +0000 UTC m=+863.151609608" observedRunningTime="2025-11-25 17:01:29.79102337 +0000 UTC m=+864.631165485" watchObservedRunningTime="2025-11-25 17:01:29.79545142 +0000 UTC m=+864.635593515" Nov 25 17:01:29 crc kubenswrapper[4812]: I1125 17:01:29.797612 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-pljcc" event={"ID":"48707b31-d8f9-4a7e-a8b9-2728249f0a49","Type":"ContainerStarted","Data":"93f2120c71a9a9e62a10dd43acc27e39c6a806edeb54faf9f4dada171ca9446e"} Nov 25 17:01:29 crc kubenswrapper[4812]: I1125 17:01:29.797693 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-pljcc" Nov 25 17:01:29 crc kubenswrapper[4812]: I1125 17:01:29.799357 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-vzgsw" event={"ID":"98133284-26db-4073-a43c-f9572476153c","Type":"ContainerStarted","Data":"6a24c8bb4bddf9de23eeccfe8303f03260b883068a7d7ceb6bd2370fe7cf0f9f"} Nov 25 17:01:29 crc kubenswrapper[4812]: E1125 17:01:29.802321 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-864885998-vzgsw" podUID="98133284-26db-4073-a43c-f9572476153c" Nov 25 17:01:29 crc kubenswrapper[4812]: I1125 17:01:29.807298 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-jnszn" event={"ID":"36e2e4fb-0369-4347-b8e6-3f4bb2e6f88b","Type":"ContainerStarted","Data":"bfbd6d9595b0e50e817a48a3eda0c004b7a61876a161ebb4ea022fb2bd8c70da"} Nov 25 17:01:29 crc kubenswrapper[4812]: I1125 17:01:29.808055 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-jnszn" Nov 25 17:01:29 crc kubenswrapper[4812]: I1125 17:01:29.818173 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-zzhb4" event={"ID":"7f72311f-8622-43f6-b499-8b52318b0e2a","Type":"ContainerStarted","Data":"68bf256151a7f3a9cd63b5d40bea6273f7efdca32175f2d7dc3f38e7b472064c"} Nov 25 17:01:29 crc kubenswrapper[4812]: I1125 17:01:29.818853 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-774b86978c-zzhb4" Nov 25 17:01:29 crc kubenswrapper[4812]: I1125 17:01:29.821871 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kx2fg" podStartSLOduration=4.112375214 podStartE2EDuration="27.821848405s" podCreationTimestamp="2025-11-25 17:01:02 +0000 UTC" firstStartedPulling="2025-11-25 17:01:04.836747783 +0000 UTC m=+839.676889878" lastFinishedPulling="2025-11-25 17:01:28.546220974 +0000 UTC m=+863.386363069" observedRunningTime="2025-11-25 17:01:29.81788262 +0000 UTC m=+864.658024725" watchObservedRunningTime="2025-11-25 17:01:29.821848405 +0000 UTC m=+864.661990500" Nov 25 17:01:29 crc kubenswrapper[4812]: I1125 17:01:29.822733 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vncgw" event={"ID":"d6f00506-8ef7-46ec-9492-01e0005f90d3","Type":"ContainerStarted","Data":"09d37fcc18d3aa788864198b33306cd8a21a8463f0ba42cd1a555ba05102dd4e"} Nov 25 17:01:29 crc kubenswrapper[4812]: I1125 17:01:29.853323 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-bsfwl" podStartSLOduration=4.319026421 podStartE2EDuration="27.85328126s" podCreationTimestamp="2025-11-25 17:01:02 +0000 UTC" firstStartedPulling="2025-11-25 17:01:04.666410626 +0000 UTC m=+839.506552721" lastFinishedPulling="2025-11-25 17:01:28.200665465 +0000 UTC m=+863.040807560" observedRunningTime="2025-11-25 17:01:29.841664883 +0000 UTC m=+864.681806998" watchObservedRunningTime="2025-11-25 17:01:29.85328126 +0000 UTC m=+864.693423355" Nov 25 17:01:29 crc kubenswrapper[4812]: I1125 17:01:29.860465 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4bq5l" event={"ID":"eecbbe13-dde4-4f80-9472-5050409a5a43","Type":"ContainerStarted","Data":"eab147999592fa60ea8e9184e2be43e80e2a2ec05a363ec8248bee4f4195c38f"} Nov 25 17:01:29 crc kubenswrapper[4812]: I1125 17:01:29.865995 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-858778c9dc-ncdvr" event={"ID":"16fc0b64-6599-4b8b-a0b7-b609dab9dd31","Type":"ContainerStarted","Data":"e58091b530a3cded602181dcde531de3f90ddf1a2b9df50c0380afd1a4a60e4a"} Nov 25 17:01:29 crc kubenswrapper[4812]: I1125 17:01:29.866878 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-858778c9dc-ncdvr" Nov 25 17:01:29 crc kubenswrapper[4812]: I1125 17:01:29.871837 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-vnjdk" podStartSLOduration=6.57106317 podStartE2EDuration="27.871816668s" podCreationTimestamp="2025-11-25 17:01:02 +0000 UTC" firstStartedPulling="2025-11-25 17:01:04.748478271 +0000 UTC m=+839.588620366" lastFinishedPulling="2025-11-25 17:01:26.049231769 +0000 UTC m=+860.889373864" observedRunningTime="2025-11-25 17:01:29.867253382 +0000 UTC m=+864.707395487" watchObservedRunningTime="2025-11-25 17:01:29.871816668 +0000 UTC m=+864.711958763" Nov 25 17:01:29 crc kubenswrapper[4812]: I1125 17:01:29.889506 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7dd4g" event={"ID":"2374c36a-5118-4a90-985c-1f80597d73af","Type":"ContainerStarted","Data":"4535945b9282a68037fdfcc6356bba2035f24036eee3ef8a7e967f65a66c1639"} Nov 25 17:01:29 crc kubenswrapper[4812]: I1125 17:01:29.889590 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7dd4g" Nov 25 17:01:29 crc kubenswrapper[4812]: I1125 17:01:29.897956 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jth28" podStartSLOduration=3.37672838 podStartE2EDuration="27.897930404s" podCreationTimestamp="2025-11-25 17:01:02 +0000 UTC" firstStartedPulling="2025-11-25 17:01:04.218463484 +0000 UTC m=+839.058605579" lastFinishedPulling="2025-11-25 17:01:28.739665518 +0000 UTC m=+863.579807603" observedRunningTime="2025-11-25 17:01:29.889356162 +0000 UTC m=+864.729498267" watchObservedRunningTime="2025-11-25 17:01:29.897930404 +0000 UTC m=+864.738072509" Nov 25 17:01:29 crc kubenswrapper[4812]: I1125 17:01:29.902828 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-clwgz" event={"ID":"bac38f31-ec39-46b9-9bac-2920864fb8a2","Type":"ContainerStarted","Data":"de0c3725e52bf8a1441e6c2cfb5d6863ce8015fc10d34c268999033df3d48748"} Nov 25 17:01:29 crc kubenswrapper[4812]: I1125 17:01:29.902879 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-clwgz" Nov 25 17:01:29 crc kubenswrapper[4812]: I1125 17:01:29.922666 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-69b8c86946-ww5m5" podStartSLOduration=3.6120044140000003 podStartE2EDuration="27.922646816s" podCreationTimestamp="2025-11-25 17:01:02 +0000 UTC" firstStartedPulling="2025-11-25 17:01:04.212821672 +0000 UTC m=+839.052963767" lastFinishedPulling="2025-11-25 17:01:28.523464074 +0000 UTC m=+863.363606169" observedRunningTime="2025-11-25 17:01:29.922018416 +0000 UTC m=+864.762160521" watchObservedRunningTime="2025-11-25 17:01:29.922646816 +0000 UTC m=+864.762788921" Nov 25 17:01:29 crc kubenswrapper[4812]: I1125 17:01:29.949687 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-pljcc" podStartSLOduration=3.485119298 podStartE2EDuration="27.949669281s" podCreationTimestamp="2025-11-25 17:01:02 +0000 UTC" firstStartedPulling="2025-11-25 17:01:04.203856789 +0000 UTC m=+839.043998884" lastFinishedPulling="2025-11-25 17:01:28.668406772 +0000 UTC m=+863.508548867" observedRunningTime="2025-11-25 17:01:29.948016299 +0000 UTC m=+864.788158404" watchObservedRunningTime="2025-11-25 17:01:29.949669281 +0000 UTC m=+864.789811386" Nov 25 17:01:29 crc kubenswrapper[4812]: I1125 17:01:29.979215 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7dd4g" podStartSLOduration=3.384957182 podStartE2EDuration="27.979200606s" podCreationTimestamp="2025-11-25 17:01:02 +0000 UTC" firstStartedPulling="2025-11-25 17:01:03.737444099 +0000 UTC m=+838.577586194" lastFinishedPulling="2025-11-25 17:01:28.331687533 +0000 UTC m=+863.171829618" observedRunningTime="2025-11-25 17:01:29.978758173 +0000 UTC m=+864.818900288" watchObservedRunningTime="2025-11-25 17:01:29.979200606 +0000 UTC m=+864.819342701" Nov 25 17:01:30 crc kubenswrapper[4812]: I1125 17:01:30.053431 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-clwgz" podStartSLOduration=3.230349265 podStartE2EDuration="28.053414926s" podCreationTimestamp="2025-11-25 17:01:02 +0000 UTC" firstStartedPulling="2025-11-25 17:01:03.530233836 +0000 UTC m=+838.370375921" lastFinishedPulling="2025-11-25 17:01:28.353299497 +0000 UTC m=+863.193441582" observedRunningTime="2025-11-25 17:01:30.050496433 +0000 UTC m=+864.890638528" watchObservedRunningTime="2025-11-25 17:01:30.053414926 +0000 UTC m=+864.893557021" Nov 25 17:01:30 crc kubenswrapper[4812]: I1125 17:01:30.084643 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-858778c9dc-ncdvr" podStartSLOduration=3.691916711 podStartE2EDuration="28.084621144s" podCreationTimestamp="2025-11-25 17:01:02 +0000 UTC" firstStartedPulling="2025-11-25 17:01:04.207121638 +0000 UTC m=+839.047263733" lastFinishedPulling="2025-11-25 17:01:28.599826081 +0000 UTC m=+863.439968166" observedRunningTime="2025-11-25 17:01:30.080790303 +0000 UTC m=+864.920932408" watchObservedRunningTime="2025-11-25 17:01:30.084621144 +0000 UTC m=+864.924763249" Nov 25 17:01:30 crc kubenswrapper[4812]: I1125 17:01:30.169808 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-774b86978c-zzhb4" podStartSLOduration=4.159105839 podStartE2EDuration="28.169789199s" podCreationTimestamp="2025-11-25 17:01:02 +0000 UTC" firstStartedPulling="2025-11-25 17:01:04.20718774 +0000 UTC m=+839.047329835" lastFinishedPulling="2025-11-25 17:01:28.2178711 +0000 UTC m=+863.058013195" observedRunningTime="2025-11-25 17:01:30.143168587 +0000 UTC m=+864.983310682" watchObservedRunningTime="2025-11-25 17:01:30.169789199 +0000 UTC m=+865.009931294" Nov 25 17:01:30 crc kubenswrapper[4812]: I1125 17:01:30.191425 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-5cb74df96-jnhr9" podStartSLOduration=4.526635399 podStartE2EDuration="28.191409494s" podCreationTimestamp="2025-11-25 17:01:02 +0000 UTC" firstStartedPulling="2025-11-25 17:01:04.666925829 +0000 UTC m=+839.507067924" lastFinishedPulling="2025-11-25 17:01:28.331699924 +0000 UTC m=+863.171842019" observedRunningTime="2025-11-25 17:01:30.166215307 +0000 UTC m=+865.006357402" watchObservedRunningTime="2025-11-25 17:01:30.191409494 +0000 UTC m=+865.031551579" Nov 25 17:01:30 crc kubenswrapper[4812]: I1125 17:01:30.194561 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-jnszn" podStartSLOduration=3.379857256 podStartE2EDuration="28.194522743s" podCreationTimestamp="2025-11-25 17:01:02 +0000 UTC" firstStartedPulling="2025-11-25 17:01:03.820329827 +0000 UTC m=+838.660471922" lastFinishedPulling="2025-11-25 17:01:28.634995314 +0000 UTC m=+863.475137409" observedRunningTime="2025-11-25 17:01:30.190054091 +0000 UTC m=+865.030196186" watchObservedRunningTime="2025-11-25 17:01:30.194522743 +0000 UTC m=+865.034664838" Nov 25 17:01:30 crc kubenswrapper[4812]: I1125 17:01:30.917048 4812 generic.go:334] "Generic (PLEG): container finished" podID="eecbbe13-dde4-4f80-9472-5050409a5a43" containerID="eab147999592fa60ea8e9184e2be43e80e2a2ec05a363ec8248bee4f4195c38f" exitCode=0 Nov 25 17:01:30 crc kubenswrapper[4812]: I1125 17:01:30.917155 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4bq5l" event={"ID":"eecbbe13-dde4-4f80-9472-5050409a5a43","Type":"ContainerDied","Data":"eab147999592fa60ea8e9184e2be43e80e2a2ec05a363ec8248bee4f4195c38f"} Nov 25 17:01:30 crc kubenswrapper[4812]: E1125 17:01:30.920961 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-864885998-vzgsw" podUID="98133284-26db-4073-a43c-f9572476153c" Nov 25 17:01:30 crc kubenswrapper[4812]: I1125 17:01:30.921062 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7dd4g" Nov 25 17:01:30 crc kubenswrapper[4812]: I1125 17:01:30.921138 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-clwgz" Nov 25 17:01:30 crc kubenswrapper[4812]: I1125 17:01:30.924804 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-858778c9dc-ncdvr" Nov 25 17:01:31 crc kubenswrapper[4812]: I1125 17:01:31.925053 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vncgw" event={"ID":"d6f00506-8ef7-46ec-9492-01e0005f90d3","Type":"ContainerStarted","Data":"a2441127312ad5277ba2486c2d0f087210176039960daaf41da269f95dbca100"} Nov 25 17:01:31 crc kubenswrapper[4812]: I1125 17:01:31.925629 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vncgw" Nov 25 17:01:31 crc kubenswrapper[4812]: I1125 17:01:31.927306 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qn86p" event={"ID":"18418f15-9ec8-48df-a761-118f45058d06","Type":"ContainerStarted","Data":"d00cf2235f4744c58203a99d5605089a7e152ebeaa45e0f543846cd12189ea5e"} Nov 25 17:01:31 crc kubenswrapper[4812]: I1125 17:01:31.927590 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qn86p" Nov 25 17:01:31 crc kubenswrapper[4812]: I1125 17:01:31.930293 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-jnszn" Nov 25 17:01:31 crc kubenswrapper[4812]: I1125 17:01:31.948130 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vncgw" podStartSLOduration=3.452815941 podStartE2EDuration="29.948111464s" podCreationTimestamp="2025-11-25 17:01:02 +0000 UTC" firstStartedPulling="2025-11-25 17:01:04.704905975 +0000 UTC m=+839.545048060" lastFinishedPulling="2025-11-25 17:01:31.200201498 +0000 UTC m=+866.040343583" observedRunningTime="2025-11-25 17:01:31.942065453 +0000 UTC m=+866.782207568" watchObservedRunningTime="2025-11-25 17:01:31.948111464 +0000 UTC m=+866.788253559" Nov 25 17:01:32 crc kubenswrapper[4812]: I1125 17:01:32.741124 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-69b8c86946-ww5m5" Nov 25 17:01:32 crc kubenswrapper[4812]: I1125 17:01:32.759517 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qn86p" podStartSLOduration=4.091792022 podStartE2EDuration="30.759496359s" podCreationTimestamp="2025-11-25 17:01:02 +0000 UTC" firstStartedPulling="2025-11-25 17:01:04.748786859 +0000 UTC m=+839.588928954" lastFinishedPulling="2025-11-25 17:01:31.416491186 +0000 UTC m=+866.256633291" observedRunningTime="2025-11-25 17:01:31.994305156 +0000 UTC m=+866.834447261" watchObservedRunningTime="2025-11-25 17:01:32.759496359 +0000 UTC m=+867.599638454" Nov 25 17:01:32 crc kubenswrapper[4812]: I1125 17:01:32.768211 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-774b86978c-zzhb4" Nov 25 17:01:32 crc kubenswrapper[4812]: I1125 17:01:32.805399 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jth28" Nov 25 17:01:32 crc kubenswrapper[4812]: I1125 17:01:32.903240 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-pljcc" Nov 25 17:01:32 crc kubenswrapper[4812]: I1125 17:01:32.941594 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4bq5l" event={"ID":"eecbbe13-dde4-4f80-9472-5050409a5a43","Type":"ContainerStarted","Data":"240dcac61066a6c69d8c891ecdb97bfdd3eb180e73a0470045769da13f82177c"} Nov 25 17:01:32 crc kubenswrapper[4812]: I1125 17:01:32.943847 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-6jwl5" Nov 25 17:01:32 crc kubenswrapper[4812]: I1125 17:01:32.965028 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-4bq5l" podStartSLOduration=22.122337901 podStartE2EDuration="25.964943483s" podCreationTimestamp="2025-11-25 17:01:07 +0000 UTC" firstStartedPulling="2025-11-25 17:01:28.01791584 +0000 UTC m=+862.858057935" lastFinishedPulling="2025-11-25 17:01:31.860521422 +0000 UTC m=+866.700663517" observedRunningTime="2025-11-25 17:01:32.959272004 +0000 UTC m=+867.799414119" watchObservedRunningTime="2025-11-25 17:01:32.964943483 +0000 UTC m=+867.805085578" Nov 25 17:01:33 crc kubenswrapper[4812]: I1125 17:01:33.020161 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-vnjdk" Nov 25 17:01:33 crc kubenswrapper[4812]: I1125 17:01:33.073466 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-bsfwl" Nov 25 17:01:33 crc kubenswrapper[4812]: I1125 17:01:33.108932 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-6kqrr" Nov 25 17:01:33 crc kubenswrapper[4812]: I1125 17:01:33.136143 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-mrlbz" Nov 25 17:01:33 crc kubenswrapper[4812]: I1125 17:01:33.243605 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kx2fg" Nov 25 17:01:33 crc kubenswrapper[4812]: I1125 17:01:33.316312 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-2gwk8" Nov 25 17:01:33 crc kubenswrapper[4812]: I1125 17:01:33.333672 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-h9l5r" Nov 25 17:01:33 crc kubenswrapper[4812]: I1125 17:01:33.527117 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5cb74df96-jnhr9" Nov 25 17:01:34 crc kubenswrapper[4812]: I1125 17:01:34.676074 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-2d2j9" Nov 25 17:01:37 crc kubenswrapper[4812]: I1125 17:01:37.228906 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-585789bb75-mft5q" Nov 25 17:01:37 crc kubenswrapper[4812]: I1125 17:01:37.444057 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-4bq5l" Nov 25 17:01:37 crc kubenswrapper[4812]: I1125 17:01:37.444420 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-4bq5l" Nov 25 17:01:37 crc kubenswrapper[4812]: I1125 17:01:37.481767 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-4bq5l" Nov 25 17:01:38 crc kubenswrapper[4812]: I1125 17:01:38.020314 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-4bq5l" Nov 25 17:01:38 crc kubenswrapper[4812]: I1125 17:01:38.066175 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4bq5l"] Nov 25 17:01:39 crc kubenswrapper[4812]: I1125 17:01:39.992244 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-4bq5l" podUID="eecbbe13-dde4-4f80-9472-5050409a5a43" containerName="registry-server" containerID="cri-o://240dcac61066a6c69d8c891ecdb97bfdd3eb180e73a0470045769da13f82177c" gracePeriod=2 Nov 25 17:01:40 crc kubenswrapper[4812]: I1125 17:01:40.433832 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4bq5l" Nov 25 17:01:40 crc kubenswrapper[4812]: I1125 17:01:40.511500 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eecbbe13-dde4-4f80-9472-5050409a5a43-utilities\") pod \"eecbbe13-dde4-4f80-9472-5050409a5a43\" (UID: \"eecbbe13-dde4-4f80-9472-5050409a5a43\") " Nov 25 17:01:40 crc kubenswrapper[4812]: I1125 17:01:40.511605 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n9v8z\" (UniqueName: \"kubernetes.io/projected/eecbbe13-dde4-4f80-9472-5050409a5a43-kube-api-access-n9v8z\") pod \"eecbbe13-dde4-4f80-9472-5050409a5a43\" (UID: \"eecbbe13-dde4-4f80-9472-5050409a5a43\") " Nov 25 17:01:40 crc kubenswrapper[4812]: I1125 17:01:40.511731 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eecbbe13-dde4-4f80-9472-5050409a5a43-catalog-content\") pod \"eecbbe13-dde4-4f80-9472-5050409a5a43\" (UID: \"eecbbe13-dde4-4f80-9472-5050409a5a43\") " Nov 25 17:01:40 crc kubenswrapper[4812]: I1125 17:01:40.512419 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eecbbe13-dde4-4f80-9472-5050409a5a43-utilities" (OuterVolumeSpecName: "utilities") pod "eecbbe13-dde4-4f80-9472-5050409a5a43" (UID: "eecbbe13-dde4-4f80-9472-5050409a5a43"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:01:40 crc kubenswrapper[4812]: I1125 17:01:40.517160 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eecbbe13-dde4-4f80-9472-5050409a5a43-kube-api-access-n9v8z" (OuterVolumeSpecName: "kube-api-access-n9v8z") pod "eecbbe13-dde4-4f80-9472-5050409a5a43" (UID: "eecbbe13-dde4-4f80-9472-5050409a5a43"). InnerVolumeSpecName "kube-api-access-n9v8z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:01:40 crc kubenswrapper[4812]: I1125 17:01:40.563062 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eecbbe13-dde4-4f80-9472-5050409a5a43-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "eecbbe13-dde4-4f80-9472-5050409a5a43" (UID: "eecbbe13-dde4-4f80-9472-5050409a5a43"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:01:40 crc kubenswrapper[4812]: I1125 17:01:40.612750 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eecbbe13-dde4-4f80-9472-5050409a5a43-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:01:40 crc kubenswrapper[4812]: I1125 17:01:40.612994 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eecbbe13-dde4-4f80-9472-5050409a5a43-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:01:40 crc kubenswrapper[4812]: I1125 17:01:40.613058 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n9v8z\" (UniqueName: \"kubernetes.io/projected/eecbbe13-dde4-4f80-9472-5050409a5a43-kube-api-access-n9v8z\") on node \"crc\" DevicePath \"\"" Nov 25 17:01:41 crc kubenswrapper[4812]: I1125 17:01:41.002672 4812 generic.go:334] "Generic (PLEG): container finished" podID="eecbbe13-dde4-4f80-9472-5050409a5a43" containerID="240dcac61066a6c69d8c891ecdb97bfdd3eb180e73a0470045769da13f82177c" exitCode=0 Nov 25 17:01:41 crc kubenswrapper[4812]: I1125 17:01:41.002730 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4bq5l" event={"ID":"eecbbe13-dde4-4f80-9472-5050409a5a43","Type":"ContainerDied","Data":"240dcac61066a6c69d8c891ecdb97bfdd3eb180e73a0470045769da13f82177c"} Nov 25 17:01:41 crc kubenswrapper[4812]: I1125 17:01:41.002770 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4bq5l" event={"ID":"eecbbe13-dde4-4f80-9472-5050409a5a43","Type":"ContainerDied","Data":"d82f704fa693291cf03272bcd3b48692d2d57c7173d83f3fdb0d946ea76b1f0a"} Nov 25 17:01:41 crc kubenswrapper[4812]: I1125 17:01:41.002775 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4bq5l" Nov 25 17:01:41 crc kubenswrapper[4812]: I1125 17:01:41.002793 4812 scope.go:117] "RemoveContainer" containerID="240dcac61066a6c69d8c891ecdb97bfdd3eb180e73a0470045769da13f82177c" Nov 25 17:01:41 crc kubenswrapper[4812]: I1125 17:01:41.025108 4812 scope.go:117] "RemoveContainer" containerID="eab147999592fa60ea8e9184e2be43e80e2a2ec05a363ec8248bee4f4195c38f" Nov 25 17:01:41 crc kubenswrapper[4812]: I1125 17:01:41.040956 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4bq5l"] Nov 25 17:01:41 crc kubenswrapper[4812]: I1125 17:01:41.048816 4812 scope.go:117] "RemoveContainer" containerID="44767280394cc96a30cbb2bd45aceb769adb85098f3118f1dfb9c24896d46afc" Nov 25 17:01:41 crc kubenswrapper[4812]: I1125 17:01:41.055902 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-4bq5l"] Nov 25 17:01:41 crc kubenswrapper[4812]: I1125 17:01:41.074460 4812 scope.go:117] "RemoveContainer" containerID="240dcac61066a6c69d8c891ecdb97bfdd3eb180e73a0470045769da13f82177c" Nov 25 17:01:41 crc kubenswrapper[4812]: E1125 17:01:41.074992 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"240dcac61066a6c69d8c891ecdb97bfdd3eb180e73a0470045769da13f82177c\": container with ID starting with 240dcac61066a6c69d8c891ecdb97bfdd3eb180e73a0470045769da13f82177c not found: ID does not exist" containerID="240dcac61066a6c69d8c891ecdb97bfdd3eb180e73a0470045769da13f82177c" Nov 25 17:01:41 crc kubenswrapper[4812]: I1125 17:01:41.075067 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"240dcac61066a6c69d8c891ecdb97bfdd3eb180e73a0470045769da13f82177c"} err="failed to get container status \"240dcac61066a6c69d8c891ecdb97bfdd3eb180e73a0470045769da13f82177c\": rpc error: code = NotFound desc = could not find container \"240dcac61066a6c69d8c891ecdb97bfdd3eb180e73a0470045769da13f82177c\": container with ID starting with 240dcac61066a6c69d8c891ecdb97bfdd3eb180e73a0470045769da13f82177c not found: ID does not exist" Nov 25 17:01:41 crc kubenswrapper[4812]: I1125 17:01:41.075106 4812 scope.go:117] "RemoveContainer" containerID="eab147999592fa60ea8e9184e2be43e80e2a2ec05a363ec8248bee4f4195c38f" Nov 25 17:01:41 crc kubenswrapper[4812]: E1125 17:01:41.075472 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eab147999592fa60ea8e9184e2be43e80e2a2ec05a363ec8248bee4f4195c38f\": container with ID starting with eab147999592fa60ea8e9184e2be43e80e2a2ec05a363ec8248bee4f4195c38f not found: ID does not exist" containerID="eab147999592fa60ea8e9184e2be43e80e2a2ec05a363ec8248bee4f4195c38f" Nov 25 17:01:41 crc kubenswrapper[4812]: I1125 17:01:41.075503 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eab147999592fa60ea8e9184e2be43e80e2a2ec05a363ec8248bee4f4195c38f"} err="failed to get container status \"eab147999592fa60ea8e9184e2be43e80e2a2ec05a363ec8248bee4f4195c38f\": rpc error: code = NotFound desc = could not find container \"eab147999592fa60ea8e9184e2be43e80e2a2ec05a363ec8248bee4f4195c38f\": container with ID starting with eab147999592fa60ea8e9184e2be43e80e2a2ec05a363ec8248bee4f4195c38f not found: ID does not exist" Nov 25 17:01:41 crc kubenswrapper[4812]: I1125 17:01:41.075617 4812 scope.go:117] "RemoveContainer" containerID="44767280394cc96a30cbb2bd45aceb769adb85098f3118f1dfb9c24896d46afc" Nov 25 17:01:41 crc kubenswrapper[4812]: E1125 17:01:41.076261 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"44767280394cc96a30cbb2bd45aceb769adb85098f3118f1dfb9c24896d46afc\": container with ID starting with 44767280394cc96a30cbb2bd45aceb769adb85098f3118f1dfb9c24896d46afc not found: ID does not exist" containerID="44767280394cc96a30cbb2bd45aceb769adb85098f3118f1dfb9c24896d46afc" Nov 25 17:01:41 crc kubenswrapper[4812]: I1125 17:01:41.076285 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"44767280394cc96a30cbb2bd45aceb769adb85098f3118f1dfb9c24896d46afc"} err="failed to get container status \"44767280394cc96a30cbb2bd45aceb769adb85098f3118f1dfb9c24896d46afc\": rpc error: code = NotFound desc = could not find container \"44767280394cc96a30cbb2bd45aceb769adb85098f3118f1dfb9c24896d46afc\": container with ID starting with 44767280394cc96a30cbb2bd45aceb769adb85098f3118f1dfb9c24896d46afc not found: ID does not exist" Nov 25 17:01:41 crc kubenswrapper[4812]: I1125 17:01:41.842263 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eecbbe13-dde4-4f80-9472-5050409a5a43" path="/var/lib/kubelet/pods/eecbbe13-dde4-4f80-9472-5050409a5a43/volumes" Nov 25 17:01:43 crc kubenswrapper[4812]: I1125 17:01:43.054357 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qn86p" Nov 25 17:01:43 crc kubenswrapper[4812]: I1125 17:01:43.084813 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vncgw" Nov 25 17:02:02 crc kubenswrapper[4812]: I1125 17:02:02.153895 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-vzgsw" event={"ID":"98133284-26db-4073-a43c-f9572476153c","Type":"ContainerStarted","Data":"1589153ca57a370c41d950d1ef41a4a6af430c627f5364bdb0ba6a4ace1e65d2"} Nov 25 17:02:02 crc kubenswrapper[4812]: I1125 17:02:02.154594 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-864885998-vzgsw" Nov 25 17:02:02 crc kubenswrapper[4812]: I1125 17:02:02.169276 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-864885998-vzgsw" podStartSLOduration=3.7937169490000002 podStartE2EDuration="1m0.169260966s" podCreationTimestamp="2025-11-25 17:01:02 +0000 UTC" firstStartedPulling="2025-11-25 17:01:04.705302455 +0000 UTC m=+839.545444550" lastFinishedPulling="2025-11-25 17:02:01.080846472 +0000 UTC m=+895.920988567" observedRunningTime="2025-11-25 17:02:02.168572647 +0000 UTC m=+897.008714752" watchObservedRunningTime="2025-11-25 17:02:02.169260966 +0000 UTC m=+897.009403061" Nov 25 17:02:13 crc kubenswrapper[4812]: I1125 17:02:13.550133 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-864885998-vzgsw" Nov 25 17:02:37 crc kubenswrapper[4812]: I1125 17:02:37.768774 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-7mnvx"] Nov 25 17:02:37 crc kubenswrapper[4812]: E1125 17:02:37.769785 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eecbbe13-dde4-4f80-9472-5050409a5a43" containerName="extract-content" Nov 25 17:02:37 crc kubenswrapper[4812]: I1125 17:02:37.769804 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="eecbbe13-dde4-4f80-9472-5050409a5a43" containerName="extract-content" Nov 25 17:02:37 crc kubenswrapper[4812]: E1125 17:02:37.769851 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eecbbe13-dde4-4f80-9472-5050409a5a43" containerName="registry-server" Nov 25 17:02:37 crc kubenswrapper[4812]: I1125 17:02:37.769858 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="eecbbe13-dde4-4f80-9472-5050409a5a43" containerName="registry-server" Nov 25 17:02:37 crc kubenswrapper[4812]: E1125 17:02:37.769885 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eecbbe13-dde4-4f80-9472-5050409a5a43" containerName="extract-utilities" Nov 25 17:02:37 crc kubenswrapper[4812]: I1125 17:02:37.769894 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="eecbbe13-dde4-4f80-9472-5050409a5a43" containerName="extract-utilities" Nov 25 17:02:37 crc kubenswrapper[4812]: I1125 17:02:37.770065 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="eecbbe13-dde4-4f80-9472-5050409a5a43" containerName="registry-server" Nov 25 17:02:37 crc kubenswrapper[4812]: I1125 17:02:37.772193 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-7mnvx" Nov 25 17:02:37 crc kubenswrapper[4812]: I1125 17:02:37.777302 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 25 17:02:37 crc kubenswrapper[4812]: I1125 17:02:37.777421 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-cz9nb" Nov 25 17:02:37 crc kubenswrapper[4812]: I1125 17:02:37.777516 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 25 17:02:37 crc kubenswrapper[4812]: I1125 17:02:37.777731 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 25 17:02:37 crc kubenswrapper[4812]: I1125 17:02:37.779704 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-7mnvx"] Nov 25 17:02:37 crc kubenswrapper[4812]: I1125 17:02:37.839438 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-rqjdj"] Nov 25 17:02:37 crc kubenswrapper[4812]: I1125 17:02:37.840889 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-rqjdj" Nov 25 17:02:37 crc kubenswrapper[4812]: I1125 17:02:37.842496 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-rqjdj"] Nov 25 17:02:37 crc kubenswrapper[4812]: I1125 17:02:37.845166 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 25 17:02:37 crc kubenswrapper[4812]: I1125 17:02:37.897954 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7ef1e09-0070-4327-a021-6da19cf57116-config\") pod \"dnsmasq-dns-675f4bcbfc-7mnvx\" (UID: \"f7ef1e09-0070-4327-a021-6da19cf57116\") " pod="openstack/dnsmasq-dns-675f4bcbfc-7mnvx" Nov 25 17:02:37 crc kubenswrapper[4812]: I1125 17:02:37.898134 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kgn8p\" (UniqueName: \"kubernetes.io/projected/f7ef1e09-0070-4327-a021-6da19cf57116-kube-api-access-kgn8p\") pod \"dnsmasq-dns-675f4bcbfc-7mnvx\" (UID: \"f7ef1e09-0070-4327-a021-6da19cf57116\") " pod="openstack/dnsmasq-dns-675f4bcbfc-7mnvx" Nov 25 17:02:37 crc kubenswrapper[4812]: I1125 17:02:37.999718 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7ef1e09-0070-4327-a021-6da19cf57116-config\") pod \"dnsmasq-dns-675f4bcbfc-7mnvx\" (UID: \"f7ef1e09-0070-4327-a021-6da19cf57116\") " pod="openstack/dnsmasq-dns-675f4bcbfc-7mnvx" Nov 25 17:02:37 crc kubenswrapper[4812]: I1125 17:02:37.999776 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-rqjdj\" (UID: \"35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e\") " pod="openstack/dnsmasq-dns-78dd6ddcc-rqjdj" Nov 25 17:02:37 crc kubenswrapper[4812]: I1125 17:02:37.999812 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kgn8p\" (UniqueName: \"kubernetes.io/projected/f7ef1e09-0070-4327-a021-6da19cf57116-kube-api-access-kgn8p\") pod \"dnsmasq-dns-675f4bcbfc-7mnvx\" (UID: \"f7ef1e09-0070-4327-a021-6da19cf57116\") " pod="openstack/dnsmasq-dns-675f4bcbfc-7mnvx" Nov 25 17:02:37 crc kubenswrapper[4812]: I1125 17:02:37.999838 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e-config\") pod \"dnsmasq-dns-78dd6ddcc-rqjdj\" (UID: \"35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e\") " pod="openstack/dnsmasq-dns-78dd6ddcc-rqjdj" Nov 25 17:02:37 crc kubenswrapper[4812]: I1125 17:02:37.999866 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9h57r\" (UniqueName: \"kubernetes.io/projected/35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e-kube-api-access-9h57r\") pod \"dnsmasq-dns-78dd6ddcc-rqjdj\" (UID: \"35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e\") " pod="openstack/dnsmasq-dns-78dd6ddcc-rqjdj" Nov 25 17:02:38 crc kubenswrapper[4812]: I1125 17:02:38.000674 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7ef1e09-0070-4327-a021-6da19cf57116-config\") pod \"dnsmasq-dns-675f4bcbfc-7mnvx\" (UID: \"f7ef1e09-0070-4327-a021-6da19cf57116\") " pod="openstack/dnsmasq-dns-675f4bcbfc-7mnvx" Nov 25 17:02:38 crc kubenswrapper[4812]: I1125 17:02:38.017696 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kgn8p\" (UniqueName: \"kubernetes.io/projected/f7ef1e09-0070-4327-a021-6da19cf57116-kube-api-access-kgn8p\") pod \"dnsmasq-dns-675f4bcbfc-7mnvx\" (UID: \"f7ef1e09-0070-4327-a021-6da19cf57116\") " pod="openstack/dnsmasq-dns-675f4bcbfc-7mnvx" Nov 25 17:02:38 crc kubenswrapper[4812]: I1125 17:02:38.093385 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-7mnvx" Nov 25 17:02:38 crc kubenswrapper[4812]: I1125 17:02:38.100902 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-rqjdj\" (UID: \"35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e\") " pod="openstack/dnsmasq-dns-78dd6ddcc-rqjdj" Nov 25 17:02:38 crc kubenswrapper[4812]: I1125 17:02:38.100947 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e-config\") pod \"dnsmasq-dns-78dd6ddcc-rqjdj\" (UID: \"35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e\") " pod="openstack/dnsmasq-dns-78dd6ddcc-rqjdj" Nov 25 17:02:38 crc kubenswrapper[4812]: I1125 17:02:38.100973 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9h57r\" (UniqueName: \"kubernetes.io/projected/35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e-kube-api-access-9h57r\") pod \"dnsmasq-dns-78dd6ddcc-rqjdj\" (UID: \"35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e\") " pod="openstack/dnsmasq-dns-78dd6ddcc-rqjdj" Nov 25 17:02:38 crc kubenswrapper[4812]: I1125 17:02:38.101775 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-rqjdj\" (UID: \"35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e\") " pod="openstack/dnsmasq-dns-78dd6ddcc-rqjdj" Nov 25 17:02:38 crc kubenswrapper[4812]: I1125 17:02:38.101914 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e-config\") pod \"dnsmasq-dns-78dd6ddcc-rqjdj\" (UID: \"35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e\") " pod="openstack/dnsmasq-dns-78dd6ddcc-rqjdj" Nov 25 17:02:38 crc kubenswrapper[4812]: I1125 17:02:38.128478 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9h57r\" (UniqueName: \"kubernetes.io/projected/35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e-kube-api-access-9h57r\") pod \"dnsmasq-dns-78dd6ddcc-rqjdj\" (UID: \"35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e\") " pod="openstack/dnsmasq-dns-78dd6ddcc-rqjdj" Nov 25 17:02:38 crc kubenswrapper[4812]: I1125 17:02:38.153981 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-rqjdj" Nov 25 17:02:38 crc kubenswrapper[4812]: I1125 17:02:38.394707 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-7mnvx"] Nov 25 17:02:38 crc kubenswrapper[4812]: I1125 17:02:38.439587 4812 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 17:02:38 crc kubenswrapper[4812]: I1125 17:02:38.496691 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-rqjdj"] Nov 25 17:02:39 crc kubenswrapper[4812]: I1125 17:02:39.417902 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-rqjdj" event={"ID":"35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e","Type":"ContainerStarted","Data":"affa7bcf3a400dc781a31b0c0e65026bf13d443b2fcdb2c61a1645371f64dac8"} Nov 25 17:02:39 crc kubenswrapper[4812]: I1125 17:02:39.419055 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-7mnvx" event={"ID":"f7ef1e09-0070-4327-a021-6da19cf57116","Type":"ContainerStarted","Data":"406d61fd81e061cd4a79315c66d39fc469c97aacdd884b7da12a989227e95c2f"} Nov 25 17:02:40 crc kubenswrapper[4812]: I1125 17:02:40.770706 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-7mnvx"] Nov 25 17:02:40 crc kubenswrapper[4812]: I1125 17:02:40.791140 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-nt59k"] Nov 25 17:02:40 crc kubenswrapper[4812]: I1125 17:02:40.793986 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-nt59k" Nov 25 17:02:40 crc kubenswrapper[4812]: I1125 17:02:40.807100 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-nt59k"] Nov 25 17:02:40 crc kubenswrapper[4812]: I1125 17:02:40.948845 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2bd0bb6a-1a2f-4aac-87bc-d70993b16087-dns-svc\") pod \"dnsmasq-dns-5ccc8479f9-nt59k\" (UID: \"2bd0bb6a-1a2f-4aac-87bc-d70993b16087\") " pod="openstack/dnsmasq-dns-5ccc8479f9-nt59k" Nov 25 17:02:40 crc kubenswrapper[4812]: I1125 17:02:40.948900 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2bd0bb6a-1a2f-4aac-87bc-d70993b16087-config\") pod \"dnsmasq-dns-5ccc8479f9-nt59k\" (UID: \"2bd0bb6a-1a2f-4aac-87bc-d70993b16087\") " pod="openstack/dnsmasq-dns-5ccc8479f9-nt59k" Nov 25 17:02:40 crc kubenswrapper[4812]: I1125 17:02:40.948921 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bz56r\" (UniqueName: \"kubernetes.io/projected/2bd0bb6a-1a2f-4aac-87bc-d70993b16087-kube-api-access-bz56r\") pod \"dnsmasq-dns-5ccc8479f9-nt59k\" (UID: \"2bd0bb6a-1a2f-4aac-87bc-d70993b16087\") " pod="openstack/dnsmasq-dns-5ccc8479f9-nt59k" Nov 25 17:02:41 crc kubenswrapper[4812]: I1125 17:02:41.051157 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2bd0bb6a-1a2f-4aac-87bc-d70993b16087-config\") pod \"dnsmasq-dns-5ccc8479f9-nt59k\" (UID: \"2bd0bb6a-1a2f-4aac-87bc-d70993b16087\") " pod="openstack/dnsmasq-dns-5ccc8479f9-nt59k" Nov 25 17:02:41 crc kubenswrapper[4812]: I1125 17:02:41.051203 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bz56r\" (UniqueName: \"kubernetes.io/projected/2bd0bb6a-1a2f-4aac-87bc-d70993b16087-kube-api-access-bz56r\") pod \"dnsmasq-dns-5ccc8479f9-nt59k\" (UID: \"2bd0bb6a-1a2f-4aac-87bc-d70993b16087\") " pod="openstack/dnsmasq-dns-5ccc8479f9-nt59k" Nov 25 17:02:41 crc kubenswrapper[4812]: I1125 17:02:41.051289 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2bd0bb6a-1a2f-4aac-87bc-d70993b16087-dns-svc\") pod \"dnsmasq-dns-5ccc8479f9-nt59k\" (UID: \"2bd0bb6a-1a2f-4aac-87bc-d70993b16087\") " pod="openstack/dnsmasq-dns-5ccc8479f9-nt59k" Nov 25 17:02:41 crc kubenswrapper[4812]: I1125 17:02:41.052071 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2bd0bb6a-1a2f-4aac-87bc-d70993b16087-dns-svc\") pod \"dnsmasq-dns-5ccc8479f9-nt59k\" (UID: \"2bd0bb6a-1a2f-4aac-87bc-d70993b16087\") " pod="openstack/dnsmasq-dns-5ccc8479f9-nt59k" Nov 25 17:02:41 crc kubenswrapper[4812]: I1125 17:02:41.052579 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2bd0bb6a-1a2f-4aac-87bc-d70993b16087-config\") pod \"dnsmasq-dns-5ccc8479f9-nt59k\" (UID: \"2bd0bb6a-1a2f-4aac-87bc-d70993b16087\") " pod="openstack/dnsmasq-dns-5ccc8479f9-nt59k" Nov 25 17:02:41 crc kubenswrapper[4812]: I1125 17:02:41.054484 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-rqjdj"] Nov 25 17:02:41 crc kubenswrapper[4812]: I1125 17:02:41.078728 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bz56r\" (UniqueName: \"kubernetes.io/projected/2bd0bb6a-1a2f-4aac-87bc-d70993b16087-kube-api-access-bz56r\") pod \"dnsmasq-dns-5ccc8479f9-nt59k\" (UID: \"2bd0bb6a-1a2f-4aac-87bc-d70993b16087\") " pod="openstack/dnsmasq-dns-5ccc8479f9-nt59k" Nov 25 17:02:41 crc kubenswrapper[4812]: I1125 17:02:41.098141 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-dbrmg"] Nov 25 17:02:41 crc kubenswrapper[4812]: I1125 17:02:41.120244 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-dbrmg"] Nov 25 17:02:41 crc kubenswrapper[4812]: I1125 17:02:41.120484 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-dbrmg" Nov 25 17:02:41 crc kubenswrapper[4812]: I1125 17:02:41.134721 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-nt59k" Nov 25 17:02:41 crc kubenswrapper[4812]: I1125 17:02:41.254244 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jz2zs\" (UniqueName: \"kubernetes.io/projected/6c5e797d-5b72-471a-bb61-329bd662a9ad-kube-api-access-jz2zs\") pod \"dnsmasq-dns-57d769cc4f-dbrmg\" (UID: \"6c5e797d-5b72-471a-bb61-329bd662a9ad\") " pod="openstack/dnsmasq-dns-57d769cc4f-dbrmg" Nov 25 17:02:41 crc kubenswrapper[4812]: I1125 17:02:41.254325 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c5e797d-5b72-471a-bb61-329bd662a9ad-config\") pod \"dnsmasq-dns-57d769cc4f-dbrmg\" (UID: \"6c5e797d-5b72-471a-bb61-329bd662a9ad\") " pod="openstack/dnsmasq-dns-57d769cc4f-dbrmg" Nov 25 17:02:41 crc kubenswrapper[4812]: I1125 17:02:41.254361 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6c5e797d-5b72-471a-bb61-329bd662a9ad-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-dbrmg\" (UID: \"6c5e797d-5b72-471a-bb61-329bd662a9ad\") " pod="openstack/dnsmasq-dns-57d769cc4f-dbrmg" Nov 25 17:02:41 crc kubenswrapper[4812]: I1125 17:02:41.355679 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jz2zs\" (UniqueName: \"kubernetes.io/projected/6c5e797d-5b72-471a-bb61-329bd662a9ad-kube-api-access-jz2zs\") pod \"dnsmasq-dns-57d769cc4f-dbrmg\" (UID: \"6c5e797d-5b72-471a-bb61-329bd662a9ad\") " pod="openstack/dnsmasq-dns-57d769cc4f-dbrmg" Nov 25 17:02:41 crc kubenswrapper[4812]: I1125 17:02:41.356036 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c5e797d-5b72-471a-bb61-329bd662a9ad-config\") pod \"dnsmasq-dns-57d769cc4f-dbrmg\" (UID: \"6c5e797d-5b72-471a-bb61-329bd662a9ad\") " pod="openstack/dnsmasq-dns-57d769cc4f-dbrmg" Nov 25 17:02:41 crc kubenswrapper[4812]: I1125 17:02:41.356085 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6c5e797d-5b72-471a-bb61-329bd662a9ad-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-dbrmg\" (UID: \"6c5e797d-5b72-471a-bb61-329bd662a9ad\") " pod="openstack/dnsmasq-dns-57d769cc4f-dbrmg" Nov 25 17:02:41 crc kubenswrapper[4812]: I1125 17:02:41.356974 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c5e797d-5b72-471a-bb61-329bd662a9ad-config\") pod \"dnsmasq-dns-57d769cc4f-dbrmg\" (UID: \"6c5e797d-5b72-471a-bb61-329bd662a9ad\") " pod="openstack/dnsmasq-dns-57d769cc4f-dbrmg" Nov 25 17:02:41 crc kubenswrapper[4812]: I1125 17:02:41.356975 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6c5e797d-5b72-471a-bb61-329bd662a9ad-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-dbrmg\" (UID: \"6c5e797d-5b72-471a-bb61-329bd662a9ad\") " pod="openstack/dnsmasq-dns-57d769cc4f-dbrmg" Nov 25 17:02:41 crc kubenswrapper[4812]: I1125 17:02:41.398225 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jz2zs\" (UniqueName: \"kubernetes.io/projected/6c5e797d-5b72-471a-bb61-329bd662a9ad-kube-api-access-jz2zs\") pod \"dnsmasq-dns-57d769cc4f-dbrmg\" (UID: \"6c5e797d-5b72-471a-bb61-329bd662a9ad\") " pod="openstack/dnsmasq-dns-57d769cc4f-dbrmg" Nov 25 17:02:41 crc kubenswrapper[4812]: I1125 17:02:41.449083 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-dbrmg" Nov 25 17:02:41 crc kubenswrapper[4812]: I1125 17:02:41.928910 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 17:02:41 crc kubenswrapper[4812]: I1125 17:02:41.930396 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:02:41 crc kubenswrapper[4812]: I1125 17:02:41.935813 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 25 17:02:41 crc kubenswrapper[4812]: I1125 17:02:41.935960 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 25 17:02:41 crc kubenswrapper[4812]: I1125 17:02:41.936065 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 25 17:02:41 crc kubenswrapper[4812]: I1125 17:02:41.934219 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-f7vvj" Nov 25 17:02:41 crc kubenswrapper[4812]: I1125 17:02:41.937101 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 25 17:02:41 crc kubenswrapper[4812]: I1125 17:02:41.937114 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 25 17:02:41 crc kubenswrapper[4812]: I1125 17:02:41.937148 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 25 17:02:41 crc kubenswrapper[4812]: I1125 17:02:41.942249 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.066216 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c7cd9664-97af-4900-a89e-ee5a790506c4-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.066285 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c7cd9664-97af-4900-a89e-ee5a790506c4-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.066319 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l98hf\" (UniqueName: \"kubernetes.io/projected/c7cd9664-97af-4900-a89e-ee5a790506c4-kube-api-access-l98hf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.066400 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c7cd9664-97af-4900-a89e-ee5a790506c4-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.066421 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c7cd9664-97af-4900-a89e-ee5a790506c4-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.066606 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c7cd9664-97af-4900-a89e-ee5a790506c4-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.066786 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c7cd9664-97af-4900-a89e-ee5a790506c4-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.066819 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c7cd9664-97af-4900-a89e-ee5a790506c4-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.066847 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.066917 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c7cd9664-97af-4900-a89e-ee5a790506c4-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.066986 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c7cd9664-97af-4900-a89e-ee5a790506c4-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.167945 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c7cd9664-97af-4900-a89e-ee5a790506c4-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.168044 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c7cd9664-97af-4900-a89e-ee5a790506c4-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.168078 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c7cd9664-97af-4900-a89e-ee5a790506c4-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.168098 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c7cd9664-97af-4900-a89e-ee5a790506c4-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.168118 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l98hf\" (UniqueName: \"kubernetes.io/projected/c7cd9664-97af-4900-a89e-ee5a790506c4-kube-api-access-l98hf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.168145 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c7cd9664-97af-4900-a89e-ee5a790506c4-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.168164 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c7cd9664-97af-4900-a89e-ee5a790506c4-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.168194 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c7cd9664-97af-4900-a89e-ee5a790506c4-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.168237 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c7cd9664-97af-4900-a89e-ee5a790506c4-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.168254 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c7cd9664-97af-4900-a89e-ee5a790506c4-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.168276 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.169254 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c7cd9664-97af-4900-a89e-ee5a790506c4-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.169639 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c7cd9664-97af-4900-a89e-ee5a790506c4-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.169732 4812 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.169755 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c7cd9664-97af-4900-a89e-ee5a790506c4-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.169792 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c7cd9664-97af-4900-a89e-ee5a790506c4-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.170156 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c7cd9664-97af-4900-a89e-ee5a790506c4-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.175290 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c7cd9664-97af-4900-a89e-ee5a790506c4-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.177681 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c7cd9664-97af-4900-a89e-ee5a790506c4-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.178962 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c7cd9664-97af-4900-a89e-ee5a790506c4-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.186773 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c7cd9664-97af-4900-a89e-ee5a790506c4-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.187224 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l98hf\" (UniqueName: \"kubernetes.io/projected/c7cd9664-97af-4900-a89e-ee5a790506c4-kube-api-access-l98hf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.200059 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.211645 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.213795 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.216424 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.216482 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-f8qkh" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.216438 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.216626 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.216799 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.216831 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.217006 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.218456 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.270506 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.371312 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/171759d9-0ee5-4a7c-9548-f41d11f0c112-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " pod="openstack/rabbitmq-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.371448 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/171759d9-0ee5-4a7c-9548-f41d11f0c112-server-conf\") pod \"rabbitmq-server-0\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " pod="openstack/rabbitmq-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.371517 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/171759d9-0ee5-4a7c-9548-f41d11f0c112-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " pod="openstack/rabbitmq-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.371587 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " pod="openstack/rabbitmq-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.371649 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/171759d9-0ee5-4a7c-9548-f41d11f0c112-pod-info\") pod \"rabbitmq-server-0\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " pod="openstack/rabbitmq-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.371754 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/171759d9-0ee5-4a7c-9548-f41d11f0c112-config-data\") pod \"rabbitmq-server-0\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " pod="openstack/rabbitmq-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.371830 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/171759d9-0ee5-4a7c-9548-f41d11f0c112-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " pod="openstack/rabbitmq-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.371977 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/171759d9-0ee5-4a7c-9548-f41d11f0c112-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " pod="openstack/rabbitmq-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.372074 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/171759d9-0ee5-4a7c-9548-f41d11f0c112-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " pod="openstack/rabbitmq-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.372168 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9h46q\" (UniqueName: \"kubernetes.io/projected/171759d9-0ee5-4a7c-9548-f41d11f0c112-kube-api-access-9h46q\") pod \"rabbitmq-server-0\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " pod="openstack/rabbitmq-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.372247 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/171759d9-0ee5-4a7c-9548-f41d11f0c112-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " pod="openstack/rabbitmq-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.473234 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/171759d9-0ee5-4a7c-9548-f41d11f0c112-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " pod="openstack/rabbitmq-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.473289 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/171759d9-0ee5-4a7c-9548-f41d11f0c112-server-conf\") pod \"rabbitmq-server-0\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " pod="openstack/rabbitmq-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.473310 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/171759d9-0ee5-4a7c-9548-f41d11f0c112-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " pod="openstack/rabbitmq-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.473330 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " pod="openstack/rabbitmq-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.473350 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/171759d9-0ee5-4a7c-9548-f41d11f0c112-pod-info\") pod \"rabbitmq-server-0\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " pod="openstack/rabbitmq-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.473369 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/171759d9-0ee5-4a7c-9548-f41d11f0c112-config-data\") pod \"rabbitmq-server-0\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " pod="openstack/rabbitmq-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.473389 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/171759d9-0ee5-4a7c-9548-f41d11f0c112-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " pod="openstack/rabbitmq-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.473420 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/171759d9-0ee5-4a7c-9548-f41d11f0c112-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " pod="openstack/rabbitmq-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.473445 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/171759d9-0ee5-4a7c-9548-f41d11f0c112-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " pod="openstack/rabbitmq-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.473477 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9h46q\" (UniqueName: \"kubernetes.io/projected/171759d9-0ee5-4a7c-9548-f41d11f0c112-kube-api-access-9h46q\") pod \"rabbitmq-server-0\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " pod="openstack/rabbitmq-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.473491 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/171759d9-0ee5-4a7c-9548-f41d11f0c112-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " pod="openstack/rabbitmq-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.474585 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/171759d9-0ee5-4a7c-9548-f41d11f0c112-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " pod="openstack/rabbitmq-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.474646 4812 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/rabbitmq-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.475178 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/171759d9-0ee5-4a7c-9548-f41d11f0c112-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " pod="openstack/rabbitmq-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.475561 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/171759d9-0ee5-4a7c-9548-f41d11f0c112-server-conf\") pod \"rabbitmq-server-0\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " pod="openstack/rabbitmq-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.479098 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/171759d9-0ee5-4a7c-9548-f41d11f0c112-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " pod="openstack/rabbitmq-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.479459 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/171759d9-0ee5-4a7c-9548-f41d11f0c112-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " pod="openstack/rabbitmq-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.479490 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/171759d9-0ee5-4a7c-9548-f41d11f0c112-config-data\") pod \"rabbitmq-server-0\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " pod="openstack/rabbitmq-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.481888 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/171759d9-0ee5-4a7c-9548-f41d11f0c112-pod-info\") pod \"rabbitmq-server-0\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " pod="openstack/rabbitmq-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.483095 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/171759d9-0ee5-4a7c-9548-f41d11f0c112-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " pod="openstack/rabbitmq-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.483620 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/171759d9-0ee5-4a7c-9548-f41d11f0c112-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " pod="openstack/rabbitmq-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.497205 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9h46q\" (UniqueName: \"kubernetes.io/projected/171759d9-0ee5-4a7c-9548-f41d11f0c112-kube-api-access-9h46q\") pod \"rabbitmq-server-0\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " pod="openstack/rabbitmq-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.501193 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " pod="openstack/rabbitmq-server-0" Nov 25 17:02:42 crc kubenswrapper[4812]: I1125 17:02:42.567042 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 17:02:43 crc kubenswrapper[4812]: I1125 17:02:43.648220 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 25 17:02:43 crc kubenswrapper[4812]: I1125 17:02:43.650681 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 25 17:02:43 crc kubenswrapper[4812]: I1125 17:02:43.653671 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-6fm5m" Nov 25 17:02:43 crc kubenswrapper[4812]: I1125 17:02:43.653801 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 25 17:02:43 crc kubenswrapper[4812]: I1125 17:02:43.653897 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 25 17:02:43 crc kubenswrapper[4812]: I1125 17:02:43.653974 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 25 17:02:43 crc kubenswrapper[4812]: I1125 17:02:43.659935 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 25 17:02:43 crc kubenswrapper[4812]: I1125 17:02:43.664375 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 25 17:02:43 crc kubenswrapper[4812]: I1125 17:02:43.796977 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d25252bd-b678-4684-abe0-933dc4ac926e-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"d25252bd-b678-4684-abe0-933dc4ac926e\") " pod="openstack/openstack-galera-0" Nov 25 17:02:43 crc kubenswrapper[4812]: I1125 17:02:43.797104 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d25252bd-b678-4684-abe0-933dc4ac926e-kolla-config\") pod \"openstack-galera-0\" (UID: \"d25252bd-b678-4684-abe0-933dc4ac926e\") " pod="openstack/openstack-galera-0" Nov 25 17:02:43 crc kubenswrapper[4812]: I1125 17:02:43.797202 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/d25252bd-b678-4684-abe0-933dc4ac926e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"d25252bd-b678-4684-abe0-933dc4ac926e\") " pod="openstack/openstack-galera-0" Nov 25 17:02:43 crc kubenswrapper[4812]: I1125 17:02:43.797239 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gzvbd\" (UniqueName: \"kubernetes.io/projected/d25252bd-b678-4684-abe0-933dc4ac926e-kube-api-access-gzvbd\") pod \"openstack-galera-0\" (UID: \"d25252bd-b678-4684-abe0-933dc4ac926e\") " pod="openstack/openstack-galera-0" Nov 25 17:02:43 crc kubenswrapper[4812]: I1125 17:02:43.797266 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/d25252bd-b678-4684-abe0-933dc4ac926e-config-data-default\") pod \"openstack-galera-0\" (UID: \"d25252bd-b678-4684-abe0-933dc4ac926e\") " pod="openstack/openstack-galera-0" Nov 25 17:02:43 crc kubenswrapper[4812]: I1125 17:02:43.797491 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d25252bd-b678-4684-abe0-933dc4ac926e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"d25252bd-b678-4684-abe0-933dc4ac926e\") " pod="openstack/openstack-galera-0" Nov 25 17:02:43 crc kubenswrapper[4812]: I1125 17:02:43.797622 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/d25252bd-b678-4684-abe0-933dc4ac926e-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"d25252bd-b678-4684-abe0-933dc4ac926e\") " pod="openstack/openstack-galera-0" Nov 25 17:02:43 crc kubenswrapper[4812]: I1125 17:02:43.797656 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"d25252bd-b678-4684-abe0-933dc4ac926e\") " pod="openstack/openstack-galera-0" Nov 25 17:02:43 crc kubenswrapper[4812]: I1125 17:02:43.899271 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"d25252bd-b678-4684-abe0-933dc4ac926e\") " pod="openstack/openstack-galera-0" Nov 25 17:02:43 crc kubenswrapper[4812]: I1125 17:02:43.899382 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d25252bd-b678-4684-abe0-933dc4ac926e-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"d25252bd-b678-4684-abe0-933dc4ac926e\") " pod="openstack/openstack-galera-0" Nov 25 17:02:43 crc kubenswrapper[4812]: I1125 17:02:43.899426 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d25252bd-b678-4684-abe0-933dc4ac926e-kolla-config\") pod \"openstack-galera-0\" (UID: \"d25252bd-b678-4684-abe0-933dc4ac926e\") " pod="openstack/openstack-galera-0" Nov 25 17:02:43 crc kubenswrapper[4812]: I1125 17:02:43.899471 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gzvbd\" (UniqueName: \"kubernetes.io/projected/d25252bd-b678-4684-abe0-933dc4ac926e-kube-api-access-gzvbd\") pod \"openstack-galera-0\" (UID: \"d25252bd-b678-4684-abe0-933dc4ac926e\") " pod="openstack/openstack-galera-0" Nov 25 17:02:43 crc kubenswrapper[4812]: I1125 17:02:43.899499 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/d25252bd-b678-4684-abe0-933dc4ac926e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"d25252bd-b678-4684-abe0-933dc4ac926e\") " pod="openstack/openstack-galera-0" Nov 25 17:02:43 crc kubenswrapper[4812]: I1125 17:02:43.899747 4812 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"d25252bd-b678-4684-abe0-933dc4ac926e\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/openstack-galera-0" Nov 25 17:02:43 crc kubenswrapper[4812]: I1125 17:02:43.900149 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/d25252bd-b678-4684-abe0-933dc4ac926e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"d25252bd-b678-4684-abe0-933dc4ac926e\") " pod="openstack/openstack-galera-0" Nov 25 17:02:43 crc kubenswrapper[4812]: I1125 17:02:43.899523 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/d25252bd-b678-4684-abe0-933dc4ac926e-config-data-default\") pod \"openstack-galera-0\" (UID: \"d25252bd-b678-4684-abe0-933dc4ac926e\") " pod="openstack/openstack-galera-0" Nov 25 17:02:43 crc kubenswrapper[4812]: I1125 17:02:43.900277 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d25252bd-b678-4684-abe0-933dc4ac926e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"d25252bd-b678-4684-abe0-933dc4ac926e\") " pod="openstack/openstack-galera-0" Nov 25 17:02:43 crc kubenswrapper[4812]: I1125 17:02:43.900452 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d25252bd-b678-4684-abe0-933dc4ac926e-kolla-config\") pod \"openstack-galera-0\" (UID: \"d25252bd-b678-4684-abe0-933dc4ac926e\") " pod="openstack/openstack-galera-0" Nov 25 17:02:43 crc kubenswrapper[4812]: I1125 17:02:43.901092 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/d25252bd-b678-4684-abe0-933dc4ac926e-config-data-default\") pod \"openstack-galera-0\" (UID: \"d25252bd-b678-4684-abe0-933dc4ac926e\") " pod="openstack/openstack-galera-0" Nov 25 17:02:43 crc kubenswrapper[4812]: I1125 17:02:43.901969 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d25252bd-b678-4684-abe0-933dc4ac926e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"d25252bd-b678-4684-abe0-933dc4ac926e\") " pod="openstack/openstack-galera-0" Nov 25 17:02:43 crc kubenswrapper[4812]: I1125 17:02:43.902037 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/d25252bd-b678-4684-abe0-933dc4ac926e-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"d25252bd-b678-4684-abe0-933dc4ac926e\") " pod="openstack/openstack-galera-0" Nov 25 17:02:43 crc kubenswrapper[4812]: I1125 17:02:43.907346 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d25252bd-b678-4684-abe0-933dc4ac926e-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"d25252bd-b678-4684-abe0-933dc4ac926e\") " pod="openstack/openstack-galera-0" Nov 25 17:02:43 crc kubenswrapper[4812]: I1125 17:02:43.907877 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/d25252bd-b678-4684-abe0-933dc4ac926e-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"d25252bd-b678-4684-abe0-933dc4ac926e\") " pod="openstack/openstack-galera-0" Nov 25 17:02:43 crc kubenswrapper[4812]: I1125 17:02:43.915593 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gzvbd\" (UniqueName: \"kubernetes.io/projected/d25252bd-b678-4684-abe0-933dc4ac926e-kube-api-access-gzvbd\") pod \"openstack-galera-0\" (UID: \"d25252bd-b678-4684-abe0-933dc4ac926e\") " pod="openstack/openstack-galera-0" Nov 25 17:02:43 crc kubenswrapper[4812]: I1125 17:02:43.925744 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"d25252bd-b678-4684-abe0-933dc4ac926e\") " pod="openstack/openstack-galera-0" Nov 25 17:02:43 crc kubenswrapper[4812]: I1125 17:02:43.984802 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.045764 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.048384 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.050380 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.050716 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-zw2zw" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.050867 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.051636 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.051770 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.222239 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/578466f7-9fe7-4e31-9006-58216401d68e-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"578466f7-9fe7-4e31-9006-58216401d68e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.222285 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/578466f7-9fe7-4e31-9006-58216401d68e-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"578466f7-9fe7-4e31-9006-58216401d68e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.222310 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/578466f7-9fe7-4e31-9006-58216401d68e-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"578466f7-9fe7-4e31-9006-58216401d68e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.222673 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-cell1-galera-0\" (UID: \"578466f7-9fe7-4e31-9006-58216401d68e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.223094 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/578466f7-9fe7-4e31-9006-58216401d68e-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"578466f7-9fe7-4e31-9006-58216401d68e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.223207 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/578466f7-9fe7-4e31-9006-58216401d68e-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"578466f7-9fe7-4e31-9006-58216401d68e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.223400 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/578466f7-9fe7-4e31-9006-58216401d68e-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"578466f7-9fe7-4e31-9006-58216401d68e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.223446 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gmdd4\" (UniqueName: \"kubernetes.io/projected/578466f7-9fe7-4e31-9006-58216401d68e-kube-api-access-gmdd4\") pod \"openstack-cell1-galera-0\" (UID: \"578466f7-9fe7-4e31-9006-58216401d68e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.326967 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/578466f7-9fe7-4e31-9006-58216401d68e-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"578466f7-9fe7-4e31-9006-58216401d68e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.327022 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/578466f7-9fe7-4e31-9006-58216401d68e-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"578466f7-9fe7-4e31-9006-58216401d68e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.327044 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gmdd4\" (UniqueName: \"kubernetes.io/projected/578466f7-9fe7-4e31-9006-58216401d68e-kube-api-access-gmdd4\") pod \"openstack-cell1-galera-0\" (UID: \"578466f7-9fe7-4e31-9006-58216401d68e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.327065 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/578466f7-9fe7-4e31-9006-58216401d68e-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"578466f7-9fe7-4e31-9006-58216401d68e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.327084 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/578466f7-9fe7-4e31-9006-58216401d68e-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"578466f7-9fe7-4e31-9006-58216401d68e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.327104 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/578466f7-9fe7-4e31-9006-58216401d68e-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"578466f7-9fe7-4e31-9006-58216401d68e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.327173 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-cell1-galera-0\" (UID: \"578466f7-9fe7-4e31-9006-58216401d68e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.327213 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/578466f7-9fe7-4e31-9006-58216401d68e-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"578466f7-9fe7-4e31-9006-58216401d68e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.327652 4812 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-cell1-galera-0\" (UID: \"578466f7-9fe7-4e31-9006-58216401d68e\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/openstack-cell1-galera-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.328338 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/578466f7-9fe7-4e31-9006-58216401d68e-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"578466f7-9fe7-4e31-9006-58216401d68e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.328510 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/578466f7-9fe7-4e31-9006-58216401d68e-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"578466f7-9fe7-4e31-9006-58216401d68e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.328849 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/578466f7-9fe7-4e31-9006-58216401d68e-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"578466f7-9fe7-4e31-9006-58216401d68e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.329897 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/578466f7-9fe7-4e31-9006-58216401d68e-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"578466f7-9fe7-4e31-9006-58216401d68e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.338165 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/578466f7-9fe7-4e31-9006-58216401d68e-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"578466f7-9fe7-4e31-9006-58216401d68e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.346353 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/578466f7-9fe7-4e31-9006-58216401d68e-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"578466f7-9fe7-4e31-9006-58216401d68e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.353423 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gmdd4\" (UniqueName: \"kubernetes.io/projected/578466f7-9fe7-4e31-9006-58216401d68e-kube-api-access-gmdd4\") pod \"openstack-cell1-galera-0\" (UID: \"578466f7-9fe7-4e31-9006-58216401d68e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.370008 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-cell1-galera-0\" (UID: \"578466f7-9fe7-4e31-9006-58216401d68e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.519283 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.521065 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.522832 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.523467 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.523986 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-8n4tq" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.597452 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.634836 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fd4bc505-d422-4aea-9914-7e243fe19a26-config-data\") pod \"memcached-0\" (UID: \"fd4bc505-d422-4aea-9914-7e243fe19a26\") " pod="openstack/memcached-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.634924 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/fd4bc505-d422-4aea-9914-7e243fe19a26-memcached-tls-certs\") pod \"memcached-0\" (UID: \"fd4bc505-d422-4aea-9914-7e243fe19a26\") " pod="openstack/memcached-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.634995 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/fd4bc505-d422-4aea-9914-7e243fe19a26-kolla-config\") pod \"memcached-0\" (UID: \"fd4bc505-d422-4aea-9914-7e243fe19a26\") " pod="openstack/memcached-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.635047 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd4bc505-d422-4aea-9914-7e243fe19a26-combined-ca-bundle\") pod \"memcached-0\" (UID: \"fd4bc505-d422-4aea-9914-7e243fe19a26\") " pod="openstack/memcached-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.635091 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lskh4\" (UniqueName: \"kubernetes.io/projected/fd4bc505-d422-4aea-9914-7e243fe19a26-kube-api-access-lskh4\") pod \"memcached-0\" (UID: \"fd4bc505-d422-4aea-9914-7e243fe19a26\") " pod="openstack/memcached-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.666915 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.736749 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fd4bc505-d422-4aea-9914-7e243fe19a26-config-data\") pod \"memcached-0\" (UID: \"fd4bc505-d422-4aea-9914-7e243fe19a26\") " pod="openstack/memcached-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.736808 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/fd4bc505-d422-4aea-9914-7e243fe19a26-memcached-tls-certs\") pod \"memcached-0\" (UID: \"fd4bc505-d422-4aea-9914-7e243fe19a26\") " pod="openstack/memcached-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.736859 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/fd4bc505-d422-4aea-9914-7e243fe19a26-kolla-config\") pod \"memcached-0\" (UID: \"fd4bc505-d422-4aea-9914-7e243fe19a26\") " pod="openstack/memcached-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.736896 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd4bc505-d422-4aea-9914-7e243fe19a26-combined-ca-bundle\") pod \"memcached-0\" (UID: \"fd4bc505-d422-4aea-9914-7e243fe19a26\") " pod="openstack/memcached-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.736934 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lskh4\" (UniqueName: \"kubernetes.io/projected/fd4bc505-d422-4aea-9914-7e243fe19a26-kube-api-access-lskh4\") pod \"memcached-0\" (UID: \"fd4bc505-d422-4aea-9914-7e243fe19a26\") " pod="openstack/memcached-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.738218 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fd4bc505-d422-4aea-9914-7e243fe19a26-config-data\") pod \"memcached-0\" (UID: \"fd4bc505-d422-4aea-9914-7e243fe19a26\") " pod="openstack/memcached-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.739427 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/fd4bc505-d422-4aea-9914-7e243fe19a26-kolla-config\") pod \"memcached-0\" (UID: \"fd4bc505-d422-4aea-9914-7e243fe19a26\") " pod="openstack/memcached-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.742432 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/fd4bc505-d422-4aea-9914-7e243fe19a26-memcached-tls-certs\") pod \"memcached-0\" (UID: \"fd4bc505-d422-4aea-9914-7e243fe19a26\") " pod="openstack/memcached-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.743202 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd4bc505-d422-4aea-9914-7e243fe19a26-combined-ca-bundle\") pod \"memcached-0\" (UID: \"fd4bc505-d422-4aea-9914-7e243fe19a26\") " pod="openstack/memcached-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.770178 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lskh4\" (UniqueName: \"kubernetes.io/projected/fd4bc505-d422-4aea-9914-7e243fe19a26-kube-api-access-lskh4\") pod \"memcached-0\" (UID: \"fd4bc505-d422-4aea-9914-7e243fe19a26\") " pod="openstack/memcached-0" Nov 25 17:02:45 crc kubenswrapper[4812]: I1125 17:02:45.846224 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 25 17:02:47 crc kubenswrapper[4812]: I1125 17:02:47.209732 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 17:02:47 crc kubenswrapper[4812]: I1125 17:02:47.212084 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 17:02:47 crc kubenswrapper[4812]: I1125 17:02:47.213880 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-xmqng" Nov 25 17:02:47 crc kubenswrapper[4812]: I1125 17:02:47.219776 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 17:02:47 crc kubenswrapper[4812]: I1125 17:02:47.368468 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-58fb9\" (UniqueName: \"kubernetes.io/projected/bf71da98-d4be-4c2f-a900-118282c5fa5f-kube-api-access-58fb9\") pod \"kube-state-metrics-0\" (UID: \"bf71da98-d4be-4c2f-a900-118282c5fa5f\") " pod="openstack/kube-state-metrics-0" Nov 25 17:02:47 crc kubenswrapper[4812]: I1125 17:02:47.469479 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-58fb9\" (UniqueName: \"kubernetes.io/projected/bf71da98-d4be-4c2f-a900-118282c5fa5f-kube-api-access-58fb9\") pod \"kube-state-metrics-0\" (UID: \"bf71da98-d4be-4c2f-a900-118282c5fa5f\") " pod="openstack/kube-state-metrics-0" Nov 25 17:02:47 crc kubenswrapper[4812]: I1125 17:02:47.490255 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-58fb9\" (UniqueName: \"kubernetes.io/projected/bf71da98-d4be-4c2f-a900-118282c5fa5f-kube-api-access-58fb9\") pod \"kube-state-metrics-0\" (UID: \"bf71da98-d4be-4c2f-a900-118282c5fa5f\") " pod="openstack/kube-state-metrics-0" Nov 25 17:02:47 crc kubenswrapper[4812]: I1125 17:02:47.533668 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.155063 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-f5lv8"] Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.156554 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-f5lv8" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.158834 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.159202 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.159309 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-8gzzr" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.173088 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-f5lv8"] Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.213085 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-qntcq"] Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.215509 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-qntcq" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.230331 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-qntcq"] Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.232280 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bl5g8\" (UniqueName: \"kubernetes.io/projected/d43f9e93-ab7b-4a2f-9446-21ab9721b39f-kube-api-access-bl5g8\") pod \"ovn-controller-f5lv8\" (UID: \"d43f9e93-ab7b-4a2f-9446-21ab9721b39f\") " pod="openstack/ovn-controller-f5lv8" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.232326 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d43f9e93-ab7b-4a2f-9446-21ab9721b39f-var-run-ovn\") pod \"ovn-controller-f5lv8\" (UID: \"d43f9e93-ab7b-4a2f-9446-21ab9721b39f\") " pod="openstack/ovn-controller-f5lv8" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.232344 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d43f9e93-ab7b-4a2f-9446-21ab9721b39f-scripts\") pod \"ovn-controller-f5lv8\" (UID: \"d43f9e93-ab7b-4a2f-9446-21ab9721b39f\") " pod="openstack/ovn-controller-f5lv8" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.232367 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d43f9e93-ab7b-4a2f-9446-21ab9721b39f-var-run\") pod \"ovn-controller-f5lv8\" (UID: \"d43f9e93-ab7b-4a2f-9446-21ab9721b39f\") " pod="openstack/ovn-controller-f5lv8" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.232388 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d43f9e93-ab7b-4a2f-9446-21ab9721b39f-var-log-ovn\") pod \"ovn-controller-f5lv8\" (UID: \"d43f9e93-ab7b-4a2f-9446-21ab9721b39f\") " pod="openstack/ovn-controller-f5lv8" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.232403 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d43f9e93-ab7b-4a2f-9446-21ab9721b39f-combined-ca-bundle\") pod \"ovn-controller-f5lv8\" (UID: \"d43f9e93-ab7b-4a2f-9446-21ab9721b39f\") " pod="openstack/ovn-controller-f5lv8" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.232424 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/d43f9e93-ab7b-4a2f-9446-21ab9721b39f-ovn-controller-tls-certs\") pod \"ovn-controller-f5lv8\" (UID: \"d43f9e93-ab7b-4a2f-9446-21ab9721b39f\") " pod="openstack/ovn-controller-f5lv8" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.333332 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cls66\" (UniqueName: \"kubernetes.io/projected/86cceb9c-b87d-429f-8a66-8c6765fc4939-kube-api-access-cls66\") pod \"ovn-controller-ovs-qntcq\" (UID: \"86cceb9c-b87d-429f-8a66-8c6765fc4939\") " pod="openstack/ovn-controller-ovs-qntcq" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.333398 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d43f9e93-ab7b-4a2f-9446-21ab9721b39f-var-run-ovn\") pod \"ovn-controller-f5lv8\" (UID: \"d43f9e93-ab7b-4a2f-9446-21ab9721b39f\") " pod="openstack/ovn-controller-f5lv8" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.333420 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d43f9e93-ab7b-4a2f-9446-21ab9721b39f-scripts\") pod \"ovn-controller-f5lv8\" (UID: \"d43f9e93-ab7b-4a2f-9446-21ab9721b39f\") " pod="openstack/ovn-controller-f5lv8" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.333545 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d43f9e93-ab7b-4a2f-9446-21ab9721b39f-var-run\") pod \"ovn-controller-f5lv8\" (UID: \"d43f9e93-ab7b-4a2f-9446-21ab9721b39f\") " pod="openstack/ovn-controller-f5lv8" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.333614 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d43f9e93-ab7b-4a2f-9446-21ab9721b39f-var-log-ovn\") pod \"ovn-controller-f5lv8\" (UID: \"d43f9e93-ab7b-4a2f-9446-21ab9721b39f\") " pod="openstack/ovn-controller-f5lv8" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.333632 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d43f9e93-ab7b-4a2f-9446-21ab9721b39f-combined-ca-bundle\") pod \"ovn-controller-f5lv8\" (UID: \"d43f9e93-ab7b-4a2f-9446-21ab9721b39f\") " pod="openstack/ovn-controller-f5lv8" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.333659 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/86cceb9c-b87d-429f-8a66-8c6765fc4939-var-lib\") pod \"ovn-controller-ovs-qntcq\" (UID: \"86cceb9c-b87d-429f-8a66-8c6765fc4939\") " pod="openstack/ovn-controller-ovs-qntcq" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.333701 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/d43f9e93-ab7b-4a2f-9446-21ab9721b39f-ovn-controller-tls-certs\") pod \"ovn-controller-f5lv8\" (UID: \"d43f9e93-ab7b-4a2f-9446-21ab9721b39f\") " pod="openstack/ovn-controller-f5lv8" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.333724 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/86cceb9c-b87d-429f-8a66-8c6765fc4939-scripts\") pod \"ovn-controller-ovs-qntcq\" (UID: \"86cceb9c-b87d-429f-8a66-8c6765fc4939\") " pod="openstack/ovn-controller-ovs-qntcq" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.333816 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/86cceb9c-b87d-429f-8a66-8c6765fc4939-var-run\") pod \"ovn-controller-ovs-qntcq\" (UID: \"86cceb9c-b87d-429f-8a66-8c6765fc4939\") " pod="openstack/ovn-controller-ovs-qntcq" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.333870 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/86cceb9c-b87d-429f-8a66-8c6765fc4939-etc-ovs\") pod \"ovn-controller-ovs-qntcq\" (UID: \"86cceb9c-b87d-429f-8a66-8c6765fc4939\") " pod="openstack/ovn-controller-ovs-qntcq" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.333917 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/86cceb9c-b87d-429f-8a66-8c6765fc4939-var-log\") pod \"ovn-controller-ovs-qntcq\" (UID: \"86cceb9c-b87d-429f-8a66-8c6765fc4939\") " pod="openstack/ovn-controller-ovs-qntcq" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.334014 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bl5g8\" (UniqueName: \"kubernetes.io/projected/d43f9e93-ab7b-4a2f-9446-21ab9721b39f-kube-api-access-bl5g8\") pod \"ovn-controller-f5lv8\" (UID: \"d43f9e93-ab7b-4a2f-9446-21ab9721b39f\") " pod="openstack/ovn-controller-f5lv8" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.334089 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d43f9e93-ab7b-4a2f-9446-21ab9721b39f-var-run-ovn\") pod \"ovn-controller-f5lv8\" (UID: \"d43f9e93-ab7b-4a2f-9446-21ab9721b39f\") " pod="openstack/ovn-controller-f5lv8" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.334171 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d43f9e93-ab7b-4a2f-9446-21ab9721b39f-var-run\") pod \"ovn-controller-f5lv8\" (UID: \"d43f9e93-ab7b-4a2f-9446-21ab9721b39f\") " pod="openstack/ovn-controller-f5lv8" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.334201 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d43f9e93-ab7b-4a2f-9446-21ab9721b39f-var-log-ovn\") pod \"ovn-controller-f5lv8\" (UID: \"d43f9e93-ab7b-4a2f-9446-21ab9721b39f\") " pod="openstack/ovn-controller-f5lv8" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.335419 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d43f9e93-ab7b-4a2f-9446-21ab9721b39f-scripts\") pod \"ovn-controller-f5lv8\" (UID: \"d43f9e93-ab7b-4a2f-9446-21ab9721b39f\") " pod="openstack/ovn-controller-f5lv8" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.340521 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d43f9e93-ab7b-4a2f-9446-21ab9721b39f-combined-ca-bundle\") pod \"ovn-controller-f5lv8\" (UID: \"d43f9e93-ab7b-4a2f-9446-21ab9721b39f\") " pod="openstack/ovn-controller-f5lv8" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.343263 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/d43f9e93-ab7b-4a2f-9446-21ab9721b39f-ovn-controller-tls-certs\") pod \"ovn-controller-f5lv8\" (UID: \"d43f9e93-ab7b-4a2f-9446-21ab9721b39f\") " pod="openstack/ovn-controller-f5lv8" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.351412 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bl5g8\" (UniqueName: \"kubernetes.io/projected/d43f9e93-ab7b-4a2f-9446-21ab9721b39f-kube-api-access-bl5g8\") pod \"ovn-controller-f5lv8\" (UID: \"d43f9e93-ab7b-4a2f-9446-21ab9721b39f\") " pod="openstack/ovn-controller-f5lv8" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.435973 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cls66\" (UniqueName: \"kubernetes.io/projected/86cceb9c-b87d-429f-8a66-8c6765fc4939-kube-api-access-cls66\") pod \"ovn-controller-ovs-qntcq\" (UID: \"86cceb9c-b87d-429f-8a66-8c6765fc4939\") " pod="openstack/ovn-controller-ovs-qntcq" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.436055 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/86cceb9c-b87d-429f-8a66-8c6765fc4939-var-lib\") pod \"ovn-controller-ovs-qntcq\" (UID: \"86cceb9c-b87d-429f-8a66-8c6765fc4939\") " pod="openstack/ovn-controller-ovs-qntcq" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.436091 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/86cceb9c-b87d-429f-8a66-8c6765fc4939-scripts\") pod \"ovn-controller-ovs-qntcq\" (UID: \"86cceb9c-b87d-429f-8a66-8c6765fc4939\") " pod="openstack/ovn-controller-ovs-qntcq" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.436140 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/86cceb9c-b87d-429f-8a66-8c6765fc4939-var-run\") pod \"ovn-controller-ovs-qntcq\" (UID: \"86cceb9c-b87d-429f-8a66-8c6765fc4939\") " pod="openstack/ovn-controller-ovs-qntcq" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.436171 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/86cceb9c-b87d-429f-8a66-8c6765fc4939-etc-ovs\") pod \"ovn-controller-ovs-qntcq\" (UID: \"86cceb9c-b87d-429f-8a66-8c6765fc4939\") " pod="openstack/ovn-controller-ovs-qntcq" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.436205 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/86cceb9c-b87d-429f-8a66-8c6765fc4939-var-log\") pod \"ovn-controller-ovs-qntcq\" (UID: \"86cceb9c-b87d-429f-8a66-8c6765fc4939\") " pod="openstack/ovn-controller-ovs-qntcq" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.436556 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/86cceb9c-b87d-429f-8a66-8c6765fc4939-var-log\") pod \"ovn-controller-ovs-qntcq\" (UID: \"86cceb9c-b87d-429f-8a66-8c6765fc4939\") " pod="openstack/ovn-controller-ovs-qntcq" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.436605 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/86cceb9c-b87d-429f-8a66-8c6765fc4939-var-lib\") pod \"ovn-controller-ovs-qntcq\" (UID: \"86cceb9c-b87d-429f-8a66-8c6765fc4939\") " pod="openstack/ovn-controller-ovs-qntcq" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.436651 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/86cceb9c-b87d-429f-8a66-8c6765fc4939-var-run\") pod \"ovn-controller-ovs-qntcq\" (UID: \"86cceb9c-b87d-429f-8a66-8c6765fc4939\") " pod="openstack/ovn-controller-ovs-qntcq" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.436961 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/86cceb9c-b87d-429f-8a66-8c6765fc4939-etc-ovs\") pod \"ovn-controller-ovs-qntcq\" (UID: \"86cceb9c-b87d-429f-8a66-8c6765fc4939\") " pod="openstack/ovn-controller-ovs-qntcq" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.439183 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/86cceb9c-b87d-429f-8a66-8c6765fc4939-scripts\") pod \"ovn-controller-ovs-qntcq\" (UID: \"86cceb9c-b87d-429f-8a66-8c6765fc4939\") " pod="openstack/ovn-controller-ovs-qntcq" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.453102 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cls66\" (UniqueName: \"kubernetes.io/projected/86cceb9c-b87d-429f-8a66-8c6765fc4939-kube-api-access-cls66\") pod \"ovn-controller-ovs-qntcq\" (UID: \"86cceb9c-b87d-429f-8a66-8c6765fc4939\") " pod="openstack/ovn-controller-ovs-qntcq" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.477049 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-f5lv8" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.527779 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-qntcq" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.592906 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.594391 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.596856 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.597008 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-hgsrb" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.598064 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.598412 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.600005 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.607659 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.740093 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e00d2f6-fe57-4a67-88e7-e1f101ba51da-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"4e00d2f6-fe57-4a67-88e7-e1f101ba51da\") " pod="openstack/ovsdbserver-nb-0" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.740141 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-nb-0\" (UID: \"4e00d2f6-fe57-4a67-88e7-e1f101ba51da\") " pod="openstack/ovsdbserver-nb-0" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.740199 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4e00d2f6-fe57-4a67-88e7-e1f101ba51da-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"4e00d2f6-fe57-4a67-88e7-e1f101ba51da\") " pod="openstack/ovsdbserver-nb-0" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.740237 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4xfqt\" (UniqueName: \"kubernetes.io/projected/4e00d2f6-fe57-4a67-88e7-e1f101ba51da-kube-api-access-4xfqt\") pod \"ovsdbserver-nb-0\" (UID: \"4e00d2f6-fe57-4a67-88e7-e1f101ba51da\") " pod="openstack/ovsdbserver-nb-0" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.740282 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4e00d2f6-fe57-4a67-88e7-e1f101ba51da-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"4e00d2f6-fe57-4a67-88e7-e1f101ba51da\") " pod="openstack/ovsdbserver-nb-0" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.740323 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4e00d2f6-fe57-4a67-88e7-e1f101ba51da-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"4e00d2f6-fe57-4a67-88e7-e1f101ba51da\") " pod="openstack/ovsdbserver-nb-0" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.740489 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e00d2f6-fe57-4a67-88e7-e1f101ba51da-config\") pod \"ovsdbserver-nb-0\" (UID: \"4e00d2f6-fe57-4a67-88e7-e1f101ba51da\") " pod="openstack/ovsdbserver-nb-0" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.740555 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4e00d2f6-fe57-4a67-88e7-e1f101ba51da-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"4e00d2f6-fe57-4a67-88e7-e1f101ba51da\") " pod="openstack/ovsdbserver-nb-0" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.841784 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4xfqt\" (UniqueName: \"kubernetes.io/projected/4e00d2f6-fe57-4a67-88e7-e1f101ba51da-kube-api-access-4xfqt\") pod \"ovsdbserver-nb-0\" (UID: \"4e00d2f6-fe57-4a67-88e7-e1f101ba51da\") " pod="openstack/ovsdbserver-nb-0" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.841852 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4e00d2f6-fe57-4a67-88e7-e1f101ba51da-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"4e00d2f6-fe57-4a67-88e7-e1f101ba51da\") " pod="openstack/ovsdbserver-nb-0" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.841884 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4e00d2f6-fe57-4a67-88e7-e1f101ba51da-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"4e00d2f6-fe57-4a67-88e7-e1f101ba51da\") " pod="openstack/ovsdbserver-nb-0" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.841907 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e00d2f6-fe57-4a67-88e7-e1f101ba51da-config\") pod \"ovsdbserver-nb-0\" (UID: \"4e00d2f6-fe57-4a67-88e7-e1f101ba51da\") " pod="openstack/ovsdbserver-nb-0" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.841922 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4e00d2f6-fe57-4a67-88e7-e1f101ba51da-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"4e00d2f6-fe57-4a67-88e7-e1f101ba51da\") " pod="openstack/ovsdbserver-nb-0" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.841983 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e00d2f6-fe57-4a67-88e7-e1f101ba51da-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"4e00d2f6-fe57-4a67-88e7-e1f101ba51da\") " pod="openstack/ovsdbserver-nb-0" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.842499 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-nb-0\" (UID: \"4e00d2f6-fe57-4a67-88e7-e1f101ba51da\") " pod="openstack/ovsdbserver-nb-0" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.842591 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4e00d2f6-fe57-4a67-88e7-e1f101ba51da-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"4e00d2f6-fe57-4a67-88e7-e1f101ba51da\") " pod="openstack/ovsdbserver-nb-0" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.842601 4812 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-nb-0\" (UID: \"4e00d2f6-fe57-4a67-88e7-e1f101ba51da\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/ovsdbserver-nb-0" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.843001 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4e00d2f6-fe57-4a67-88e7-e1f101ba51da-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"4e00d2f6-fe57-4a67-88e7-e1f101ba51da\") " pod="openstack/ovsdbserver-nb-0" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.843150 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4e00d2f6-fe57-4a67-88e7-e1f101ba51da-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"4e00d2f6-fe57-4a67-88e7-e1f101ba51da\") " pod="openstack/ovsdbserver-nb-0" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.843263 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e00d2f6-fe57-4a67-88e7-e1f101ba51da-config\") pod \"ovsdbserver-nb-0\" (UID: \"4e00d2f6-fe57-4a67-88e7-e1f101ba51da\") " pod="openstack/ovsdbserver-nb-0" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.845872 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4e00d2f6-fe57-4a67-88e7-e1f101ba51da-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"4e00d2f6-fe57-4a67-88e7-e1f101ba51da\") " pod="openstack/ovsdbserver-nb-0" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.846368 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4e00d2f6-fe57-4a67-88e7-e1f101ba51da-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"4e00d2f6-fe57-4a67-88e7-e1f101ba51da\") " pod="openstack/ovsdbserver-nb-0" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.846408 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e00d2f6-fe57-4a67-88e7-e1f101ba51da-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"4e00d2f6-fe57-4a67-88e7-e1f101ba51da\") " pod="openstack/ovsdbserver-nb-0" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.861751 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-nb-0\" (UID: \"4e00d2f6-fe57-4a67-88e7-e1f101ba51da\") " pod="openstack/ovsdbserver-nb-0" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.867560 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4xfqt\" (UniqueName: \"kubernetes.io/projected/4e00d2f6-fe57-4a67-88e7-e1f101ba51da-kube-api-access-4xfqt\") pod \"ovsdbserver-nb-0\" (UID: \"4e00d2f6-fe57-4a67-88e7-e1f101ba51da\") " pod="openstack/ovsdbserver-nb-0" Nov 25 17:02:51 crc kubenswrapper[4812]: I1125 17:02:51.911851 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 25 17:02:52 crc kubenswrapper[4812]: I1125 17:02:52.771433 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 25 17:02:52 crc kubenswrapper[4812]: I1125 17:02:52.773035 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 25 17:02:52 crc kubenswrapper[4812]: I1125 17:02:52.778199 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 25 17:02:52 crc kubenswrapper[4812]: I1125 17:02:52.778273 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-fkdrg" Nov 25 17:02:52 crc kubenswrapper[4812]: I1125 17:02:52.783201 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 25 17:02:52 crc kubenswrapper[4812]: I1125 17:02:52.783213 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 25 17:02:52 crc kubenswrapper[4812]: I1125 17:02:52.789348 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 25 17:02:52 crc kubenswrapper[4812]: I1125 17:02:52.858183 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-txvrj\" (UniqueName: \"kubernetes.io/projected/79c51471-1ba0-4dc3-927c-1057283fe10b-kube-api-access-txvrj\") pod \"ovsdbserver-sb-0\" (UID: \"79c51471-1ba0-4dc3-927c-1057283fe10b\") " pod="openstack/ovsdbserver-sb-0" Nov 25 17:02:52 crc kubenswrapper[4812]: I1125 17:02:52.858233 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/79c51471-1ba0-4dc3-927c-1057283fe10b-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"79c51471-1ba0-4dc3-927c-1057283fe10b\") " pod="openstack/ovsdbserver-sb-0" Nov 25 17:02:52 crc kubenswrapper[4812]: I1125 17:02:52.858282 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79c51471-1ba0-4dc3-927c-1057283fe10b-config\") pod \"ovsdbserver-sb-0\" (UID: \"79c51471-1ba0-4dc3-927c-1057283fe10b\") " pod="openstack/ovsdbserver-sb-0" Nov 25 17:02:52 crc kubenswrapper[4812]: I1125 17:02:52.858420 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"79c51471-1ba0-4dc3-927c-1057283fe10b\") " pod="openstack/ovsdbserver-sb-0" Nov 25 17:02:52 crc kubenswrapper[4812]: I1125 17:02:52.858567 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/79c51471-1ba0-4dc3-927c-1057283fe10b-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"79c51471-1ba0-4dc3-927c-1057283fe10b\") " pod="openstack/ovsdbserver-sb-0" Nov 25 17:02:52 crc kubenswrapper[4812]: I1125 17:02:52.858622 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/79c51471-1ba0-4dc3-927c-1057283fe10b-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"79c51471-1ba0-4dc3-927c-1057283fe10b\") " pod="openstack/ovsdbserver-sb-0" Nov 25 17:02:52 crc kubenswrapper[4812]: I1125 17:02:52.858657 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79c51471-1ba0-4dc3-927c-1057283fe10b-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"79c51471-1ba0-4dc3-927c-1057283fe10b\") " pod="openstack/ovsdbserver-sb-0" Nov 25 17:02:52 crc kubenswrapper[4812]: I1125 17:02:52.858778 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/79c51471-1ba0-4dc3-927c-1057283fe10b-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"79c51471-1ba0-4dc3-927c-1057283fe10b\") " pod="openstack/ovsdbserver-sb-0" Nov 25 17:02:52 crc kubenswrapper[4812]: I1125 17:02:52.871503 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 17:02:52 crc kubenswrapper[4812]: I1125 17:02:52.960098 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/79c51471-1ba0-4dc3-927c-1057283fe10b-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"79c51471-1ba0-4dc3-927c-1057283fe10b\") " pod="openstack/ovsdbserver-sb-0" Nov 25 17:02:52 crc kubenswrapper[4812]: I1125 17:02:52.960432 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79c51471-1ba0-4dc3-927c-1057283fe10b-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"79c51471-1ba0-4dc3-927c-1057283fe10b\") " pod="openstack/ovsdbserver-sb-0" Nov 25 17:02:52 crc kubenswrapper[4812]: I1125 17:02:52.960499 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/79c51471-1ba0-4dc3-927c-1057283fe10b-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"79c51471-1ba0-4dc3-927c-1057283fe10b\") " pod="openstack/ovsdbserver-sb-0" Nov 25 17:02:52 crc kubenswrapper[4812]: I1125 17:02:52.960633 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-txvrj\" (UniqueName: \"kubernetes.io/projected/79c51471-1ba0-4dc3-927c-1057283fe10b-kube-api-access-txvrj\") pod \"ovsdbserver-sb-0\" (UID: \"79c51471-1ba0-4dc3-927c-1057283fe10b\") " pod="openstack/ovsdbserver-sb-0" Nov 25 17:02:52 crc kubenswrapper[4812]: I1125 17:02:52.960710 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/79c51471-1ba0-4dc3-927c-1057283fe10b-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"79c51471-1ba0-4dc3-927c-1057283fe10b\") " pod="openstack/ovsdbserver-sb-0" Nov 25 17:02:52 crc kubenswrapper[4812]: I1125 17:02:52.961754 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79c51471-1ba0-4dc3-927c-1057283fe10b-config\") pod \"ovsdbserver-sb-0\" (UID: \"79c51471-1ba0-4dc3-927c-1057283fe10b\") " pod="openstack/ovsdbserver-sb-0" Nov 25 17:02:52 crc kubenswrapper[4812]: I1125 17:02:52.961810 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"79c51471-1ba0-4dc3-927c-1057283fe10b\") " pod="openstack/ovsdbserver-sb-0" Nov 25 17:02:52 crc kubenswrapper[4812]: I1125 17:02:52.961952 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/79c51471-1ba0-4dc3-927c-1057283fe10b-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"79c51471-1ba0-4dc3-927c-1057283fe10b\") " pod="openstack/ovsdbserver-sb-0" Nov 25 17:02:52 crc kubenswrapper[4812]: I1125 17:02:52.962322 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/79c51471-1ba0-4dc3-927c-1057283fe10b-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"79c51471-1ba0-4dc3-927c-1057283fe10b\") " pod="openstack/ovsdbserver-sb-0" Nov 25 17:02:52 crc kubenswrapper[4812]: I1125 17:02:52.962367 4812 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"79c51471-1ba0-4dc3-927c-1057283fe10b\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/ovsdbserver-sb-0" Nov 25 17:02:52 crc kubenswrapper[4812]: I1125 17:02:52.962837 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79c51471-1ba0-4dc3-927c-1057283fe10b-config\") pod \"ovsdbserver-sb-0\" (UID: \"79c51471-1ba0-4dc3-927c-1057283fe10b\") " pod="openstack/ovsdbserver-sb-0" Nov 25 17:02:52 crc kubenswrapper[4812]: I1125 17:02:52.963177 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/79c51471-1ba0-4dc3-927c-1057283fe10b-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"79c51471-1ba0-4dc3-927c-1057283fe10b\") " pod="openstack/ovsdbserver-sb-0" Nov 25 17:02:52 crc kubenswrapper[4812]: I1125 17:02:52.969297 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79c51471-1ba0-4dc3-927c-1057283fe10b-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"79c51471-1ba0-4dc3-927c-1057283fe10b\") " pod="openstack/ovsdbserver-sb-0" Nov 25 17:02:52 crc kubenswrapper[4812]: I1125 17:02:52.970059 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/79c51471-1ba0-4dc3-927c-1057283fe10b-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"79c51471-1ba0-4dc3-927c-1057283fe10b\") " pod="openstack/ovsdbserver-sb-0" Nov 25 17:02:52 crc kubenswrapper[4812]: I1125 17:02:52.972303 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/79c51471-1ba0-4dc3-927c-1057283fe10b-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"79c51471-1ba0-4dc3-927c-1057283fe10b\") " pod="openstack/ovsdbserver-sb-0" Nov 25 17:02:52 crc kubenswrapper[4812]: I1125 17:02:52.982312 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-txvrj\" (UniqueName: \"kubernetes.io/projected/79c51471-1ba0-4dc3-927c-1057283fe10b-kube-api-access-txvrj\") pod \"ovsdbserver-sb-0\" (UID: \"79c51471-1ba0-4dc3-927c-1057283fe10b\") " pod="openstack/ovsdbserver-sb-0" Nov 25 17:02:52 crc kubenswrapper[4812]: I1125 17:02:52.988819 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"79c51471-1ba0-4dc3-927c-1057283fe10b\") " pod="openstack/ovsdbserver-sb-0" Nov 25 17:02:53 crc kubenswrapper[4812]: I1125 17:02:53.089923 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 25 17:02:53 crc kubenswrapper[4812]: E1125 17:02:53.276589 4812 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 25 17:02:53 crc kubenswrapper[4812]: E1125 17:02:53.276788 4812 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9h57r,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-rqjdj_openstack(35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 17:02:53 crc kubenswrapper[4812]: E1125 17:02:53.277980 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-rqjdj" podUID="35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e" Nov 25 17:02:53 crc kubenswrapper[4812]: W1125 17:02:53.283470 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc7cd9664_97af_4900_a89e_ee5a790506c4.slice/crio-a2a522f0cef12664bedd566719bf84b119f7ce6178e9b8a10bc66be13cd846f6 WatchSource:0}: Error finding container a2a522f0cef12664bedd566719bf84b119f7ce6178e9b8a10bc66be13cd846f6: Status 404 returned error can't find the container with id a2a522f0cef12664bedd566719bf84b119f7ce6178e9b8a10bc66be13cd846f6 Nov 25 17:02:53 crc kubenswrapper[4812]: E1125 17:02:53.323141 4812 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 25 17:02:53 crc kubenswrapper[4812]: E1125 17:02:53.323308 4812 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kgn8p,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-7mnvx_openstack(f7ef1e09-0070-4327-a021-6da19cf57116): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 17:02:53 crc kubenswrapper[4812]: E1125 17:02:53.324712 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-7mnvx" podUID="f7ef1e09-0070-4327-a021-6da19cf57116" Nov 25 17:02:53 crc kubenswrapper[4812]: I1125 17:02:53.519998 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c7cd9664-97af-4900-a89e-ee5a790506c4","Type":"ContainerStarted","Data":"a2a522f0cef12664bedd566719bf84b119f7ce6178e9b8a10bc66be13cd846f6"} Nov 25 17:02:53 crc kubenswrapper[4812]: I1125 17:02:53.875587 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-dbrmg"] Nov 25 17:02:53 crc kubenswrapper[4812]: W1125 17:02:53.883172 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6c5e797d_5b72_471a_bb61_329bd662a9ad.slice/crio-021edd3c53d3822618b86bb48bfdc16256d74a26a50ee7389e4c3f16d82bcbf5 WatchSource:0}: Error finding container 021edd3c53d3822618b86bb48bfdc16256d74a26a50ee7389e4c3f16d82bcbf5: Status 404 returned error can't find the container with id 021edd3c53d3822618b86bb48bfdc16256d74a26a50ee7389e4c3f16d82bcbf5 Nov 25 17:02:53 crc kubenswrapper[4812]: I1125 17:02:53.905288 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 17:02:53 crc kubenswrapper[4812]: W1125 17:02:53.909389 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod171759d9_0ee5_4a7c_9548_f41d11f0c112.slice/crio-6e7cb869af185332b0da0f68f1ddd2afb73a2515d7599f41c070e51ddef00fcb WatchSource:0}: Error finding container 6e7cb869af185332b0da0f68f1ddd2afb73a2515d7599f41c070e51ddef00fcb: Status 404 returned error can't find the container with id 6e7cb869af185332b0da0f68f1ddd2afb73a2515d7599f41c070e51ddef00fcb Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.154167 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-7mnvx" Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.159987 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-rqjdj" Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.239225 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.244237 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 25 17:02:54 crc kubenswrapper[4812]: W1125 17:02:54.245140 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfd4bc505_d422_4aea_9914_7e243fe19a26.slice/crio-cb592bb7c625ca6c07d83e1fbbf99bf138ec88705e08213d453cba99ce44efe7 WatchSource:0}: Error finding container cb592bb7c625ca6c07d83e1fbbf99bf138ec88705e08213d453cba99ce44efe7: Status 404 returned error can't find the container with id cb592bb7c625ca6c07d83e1fbbf99bf138ec88705e08213d453cba99ce44efe7 Nov 25 17:02:54 crc kubenswrapper[4812]: W1125 17:02:54.251247 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd25252bd_b678_4684_abe0_933dc4ac926e.slice/crio-6a91ddea1b391be47c2765097cc23cad62a4a325375310dc3a7fcd24cd65b127 WatchSource:0}: Error finding container 6a91ddea1b391be47c2765097cc23cad62a4a325375310dc3a7fcd24cd65b127: Status 404 returned error can't find the container with id 6a91ddea1b391be47c2765097cc23cad62a4a325375310dc3a7fcd24cd65b127 Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.258834 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-nt59k"] Nov 25 17:02:54 crc kubenswrapper[4812]: W1125 17:02:54.263485 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd43f9e93_ab7b_4a2f_9446_21ab9721b39f.slice/crio-e34e7ad9c84b435edbaec12657614057408a3732637974ebe21e067323136425 WatchSource:0}: Error finding container e34e7ad9c84b435edbaec12657614057408a3732637974ebe21e067323136425: Status 404 returned error can't find the container with id e34e7ad9c84b435edbaec12657614057408a3732637974ebe21e067323136425 Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.269593 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-f5lv8"] Nov 25 17:02:54 crc kubenswrapper[4812]: W1125 17:02:54.273609 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2bd0bb6a_1a2f_4aac_87bc_d70993b16087.slice/crio-1f88e505f0c34e6c56fe8f1975c3d56dcad1acad2b3480e07739824e64f323cf WatchSource:0}: Error finding container 1f88e505f0c34e6c56fe8f1975c3d56dcad1acad2b3480e07739824e64f323cf: Status 404 returned error can't find the container with id 1f88e505f0c34e6c56fe8f1975c3d56dcad1acad2b3480e07739824e64f323cf Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.273674 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 17:02:54 crc kubenswrapper[4812]: W1125 17:02:54.278836 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod578466f7_9fe7_4e31_9006_58216401d68e.slice/crio-606ca9169ea215ff59534fe224a32bbec59cfc888e20dad11392562011a1823e WatchSource:0}: Error finding container 606ca9169ea215ff59534fe224a32bbec59cfc888e20dad11392562011a1823e: Status 404 returned error can't find the container with id 606ca9169ea215ff59534fe224a32bbec59cfc888e20dad11392562011a1823e Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.279265 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.283740 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kgn8p\" (UniqueName: \"kubernetes.io/projected/f7ef1e09-0070-4327-a021-6da19cf57116-kube-api-access-kgn8p\") pod \"f7ef1e09-0070-4327-a021-6da19cf57116\" (UID: \"f7ef1e09-0070-4327-a021-6da19cf57116\") " Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.283796 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9h57r\" (UniqueName: \"kubernetes.io/projected/35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e-kube-api-access-9h57r\") pod \"35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e\" (UID: \"35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e\") " Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.283853 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e-config\") pod \"35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e\" (UID: \"35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e\") " Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.283911 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7ef1e09-0070-4327-a021-6da19cf57116-config\") pod \"f7ef1e09-0070-4327-a021-6da19cf57116\" (UID: \"f7ef1e09-0070-4327-a021-6da19cf57116\") " Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.283949 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e-dns-svc\") pod \"35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e\" (UID: \"35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e\") " Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.284640 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e-config" (OuterVolumeSpecName: "config") pod "35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e" (UID: "35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.285060 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f7ef1e09-0070-4327-a021-6da19cf57116-config" (OuterVolumeSpecName: "config") pod "f7ef1e09-0070-4327-a021-6da19cf57116" (UID: "f7ef1e09-0070-4327-a021-6da19cf57116"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.286143 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e" (UID: "35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.286388 4812 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7ef1e09-0070-4327-a021-6da19cf57116-config\") on node \"crc\" DevicePath \"\"" Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.286410 4812 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e-config\") on node \"crc\" DevicePath \"\"" Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.292064 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e-kube-api-access-9h57r" (OuterVolumeSpecName: "kube-api-access-9h57r") pod "35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e" (UID: "35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e"). InnerVolumeSpecName "kube-api-access-9h57r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.293876 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7ef1e09-0070-4327-a021-6da19cf57116-kube-api-access-kgn8p" (OuterVolumeSpecName: "kube-api-access-kgn8p") pod "f7ef1e09-0070-4327-a021-6da19cf57116" (UID: "f7ef1e09-0070-4327-a021-6da19cf57116"). InnerVolumeSpecName "kube-api-access-kgn8p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:02:54 crc kubenswrapper[4812]: W1125 17:02:54.314123 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbf71da98_d4be_4c2f_a900_118282c5fa5f.slice/crio-fb18ccc0c8b795e8db94d4e1f8b03202c2df48cb10e14229e8cbf3ee5a5b91fb WatchSource:0}: Error finding container fb18ccc0c8b795e8db94d4e1f8b03202c2df48cb10e14229e8cbf3ee5a5b91fb: Status 404 returned error can't find the container with id fb18ccc0c8b795e8db94d4e1f8b03202c2df48cb10e14229e8cbf3ee5a5b91fb Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.342565 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 25 17:02:54 crc kubenswrapper[4812]: W1125 17:02:54.346032 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4e00d2f6_fe57_4a67_88e7_e1f101ba51da.slice/crio-4a9f7e3d0d42909662ba2f43e040131357993368ff91d6f14f4f46d6104e7cfe WatchSource:0}: Error finding container 4a9f7e3d0d42909662ba2f43e040131357993368ff91d6f14f4f46d6104e7cfe: Status 404 returned error can't find the container with id 4a9f7e3d0d42909662ba2f43e040131357993368ff91d6f14f4f46d6104e7cfe Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.387586 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kgn8p\" (UniqueName: \"kubernetes.io/projected/f7ef1e09-0070-4327-a021-6da19cf57116-kube-api-access-kgn8p\") on node \"crc\" DevicePath \"\"" Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.387620 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9h57r\" (UniqueName: \"kubernetes.io/projected/35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e-kube-api-access-9h57r\") on node \"crc\" DevicePath \"\"" Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.387630 4812 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.530221 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-dbrmg" event={"ID":"6c5e797d-5b72-471a-bb61-329bd662a9ad","Type":"ContainerStarted","Data":"a95def92c669421e774e489d70351ec0f03da986b87d4fc55af787be5dc612c0"} Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.530659 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-dbrmg" event={"ID":"6c5e797d-5b72-471a-bb61-329bd662a9ad","Type":"ContainerStarted","Data":"021edd3c53d3822618b86bb48bfdc16256d74a26a50ee7389e4c3f16d82bcbf5"} Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.531910 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"578466f7-9fe7-4e31-9006-58216401d68e","Type":"ContainerStarted","Data":"606ca9169ea215ff59534fe224a32bbec59cfc888e20dad11392562011a1823e"} Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.534283 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-7mnvx" Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.534292 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-7mnvx" event={"ID":"f7ef1e09-0070-4327-a021-6da19cf57116","Type":"ContainerDied","Data":"406d61fd81e061cd4a79315c66d39fc469c97aacdd884b7da12a989227e95c2f"} Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.537404 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"fd4bc505-d422-4aea-9914-7e243fe19a26","Type":"ContainerStarted","Data":"cb592bb7c625ca6c07d83e1fbbf99bf138ec88705e08213d453cba99ce44efe7"} Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.538603 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-nt59k" event={"ID":"2bd0bb6a-1a2f-4aac-87bc-d70993b16087","Type":"ContainerStarted","Data":"1f88e505f0c34e6c56fe8f1975c3d56dcad1acad2b3480e07739824e64f323cf"} Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.539799 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"171759d9-0ee5-4a7c-9548-f41d11f0c112","Type":"ContainerStarted","Data":"6e7cb869af185332b0da0f68f1ddd2afb73a2515d7599f41c070e51ddef00fcb"} Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.542068 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"d25252bd-b678-4684-abe0-933dc4ac926e","Type":"ContainerStarted","Data":"6a91ddea1b391be47c2765097cc23cad62a4a325375310dc3a7fcd24cd65b127"} Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.548198 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-f5lv8" event={"ID":"d43f9e93-ab7b-4a2f-9446-21ab9721b39f","Type":"ContainerStarted","Data":"e34e7ad9c84b435edbaec12657614057408a3732637974ebe21e067323136425"} Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.552160 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"bf71da98-d4be-4c2f-a900-118282c5fa5f","Type":"ContainerStarted","Data":"fb18ccc0c8b795e8db94d4e1f8b03202c2df48cb10e14229e8cbf3ee5a5b91fb"} Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.553724 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"4e00d2f6-fe57-4a67-88e7-e1f101ba51da","Type":"ContainerStarted","Data":"4a9f7e3d0d42909662ba2f43e040131357993368ff91d6f14f4f46d6104e7cfe"} Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.555130 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-rqjdj" event={"ID":"35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e","Type":"ContainerDied","Data":"affa7bcf3a400dc781a31b0c0e65026bf13d443b2fcdb2c61a1645371f64dac8"} Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.555232 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-rqjdj" Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.625655 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-rqjdj"] Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.638415 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-rqjdj"] Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.651215 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-7mnvx"] Nov 25 17:02:54 crc kubenswrapper[4812]: I1125 17:02:54.656087 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-7mnvx"] Nov 25 17:02:55 crc kubenswrapper[4812]: I1125 17:02:55.164090 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-qntcq"] Nov 25 17:02:55 crc kubenswrapper[4812]: I1125 17:02:55.360150 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 25 17:02:55 crc kubenswrapper[4812]: I1125 17:02:55.567742 4812 generic.go:334] "Generic (PLEG): container finished" podID="6c5e797d-5b72-471a-bb61-329bd662a9ad" containerID="a95def92c669421e774e489d70351ec0f03da986b87d4fc55af787be5dc612c0" exitCode=0 Nov 25 17:02:55 crc kubenswrapper[4812]: I1125 17:02:55.567792 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-dbrmg" event={"ID":"6c5e797d-5b72-471a-bb61-329bd662a9ad","Type":"ContainerDied","Data":"a95def92c669421e774e489d70351ec0f03da986b87d4fc55af787be5dc612c0"} Nov 25 17:02:55 crc kubenswrapper[4812]: I1125 17:02:55.570817 4812 generic.go:334] "Generic (PLEG): container finished" podID="2bd0bb6a-1a2f-4aac-87bc-d70993b16087" containerID="889c19ea5169978409ece33c4ba463dbfe5bcac4b24cc5822ad6e3aeb1986ab5" exitCode=0 Nov 25 17:02:55 crc kubenswrapper[4812]: I1125 17:02:55.570841 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-nt59k" event={"ID":"2bd0bb6a-1a2f-4aac-87bc-d70993b16087","Type":"ContainerDied","Data":"889c19ea5169978409ece33c4ba463dbfe5bcac4b24cc5822ad6e3aeb1986ab5"} Nov 25 17:02:55 crc kubenswrapper[4812]: I1125 17:02:55.843041 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e" path="/var/lib/kubelet/pods/35e8eb03-7bc2-48d1-b9d8-7ead29f1ec2e/volumes" Nov 25 17:02:55 crc kubenswrapper[4812]: I1125 17:02:55.843501 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7ef1e09-0070-4327-a021-6da19cf57116" path="/var/lib/kubelet/pods/f7ef1e09-0070-4327-a021-6da19cf57116/volumes" Nov 25 17:02:56 crc kubenswrapper[4812]: W1125 17:02:56.426053 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod86cceb9c_b87d_429f_8a66_8c6765fc4939.slice/crio-e39e16a7eec1d82d9858bafaab0bc5af26a7b00b566cc1904a0fc389d2db6639 WatchSource:0}: Error finding container e39e16a7eec1d82d9858bafaab0bc5af26a7b00b566cc1904a0fc389d2db6639: Status 404 returned error can't find the container with id e39e16a7eec1d82d9858bafaab0bc5af26a7b00b566cc1904a0fc389d2db6639 Nov 25 17:02:56 crc kubenswrapper[4812]: W1125 17:02:56.428993 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod79c51471_1ba0_4dc3_927c_1057283fe10b.slice/crio-c85a4f790bd1809541818b38318c5884378b48a6927dab66ba7708f2a580b48c WatchSource:0}: Error finding container c85a4f790bd1809541818b38318c5884378b48a6927dab66ba7708f2a580b48c: Status 404 returned error can't find the container with id c85a4f790bd1809541818b38318c5884378b48a6927dab66ba7708f2a580b48c Nov 25 17:02:56 crc kubenswrapper[4812]: I1125 17:02:56.583923 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"79c51471-1ba0-4dc3-927c-1057283fe10b","Type":"ContainerStarted","Data":"c85a4f790bd1809541818b38318c5884378b48a6927dab66ba7708f2a580b48c"} Nov 25 17:02:56 crc kubenswrapper[4812]: I1125 17:02:56.585626 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-qntcq" event={"ID":"86cceb9c-b87d-429f-8a66-8c6765fc4939","Type":"ContainerStarted","Data":"e39e16a7eec1d82d9858bafaab0bc5af26a7b00b566cc1904a0fc389d2db6639"} Nov 25 17:03:01 crc kubenswrapper[4812]: I1125 17:03:01.622695 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-nt59k" event={"ID":"2bd0bb6a-1a2f-4aac-87bc-d70993b16087","Type":"ContainerStarted","Data":"2c3e4b724197dfdb90ac79b0ce79621a159728d7efb063e5815ff454c7ce59a2"} Nov 25 17:03:01 crc kubenswrapper[4812]: I1125 17:03:01.623758 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5ccc8479f9-nt59k" Nov 25 17:03:01 crc kubenswrapper[4812]: I1125 17:03:01.641481 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5ccc8479f9-nt59k" podStartSLOduration=21.241971578 podStartE2EDuration="21.641464335s" podCreationTimestamp="2025-11-25 17:02:40 +0000 UTC" firstStartedPulling="2025-11-25 17:02:54.307859761 +0000 UTC m=+949.148001856" lastFinishedPulling="2025-11-25 17:02:54.707352518 +0000 UTC m=+949.547494613" observedRunningTime="2025-11-25 17:03:01.640446159 +0000 UTC m=+956.480588264" watchObservedRunningTime="2025-11-25 17:03:01.641464335 +0000 UTC m=+956.481606430" Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.631721 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"fd4bc505-d422-4aea-9914-7e243fe19a26","Type":"ContainerStarted","Data":"93431548b36e8485f9bd07e13b63ea933e2851ab7d56c9d6e9bc8ec952574085"} Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.632300 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.634637 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"79c51471-1ba0-4dc3-927c-1057283fe10b","Type":"ContainerStarted","Data":"7aae9c0d71dcc09bb0a82a06e9fd1c448bb69f1d730fab033989f6eaa2d3c987"} Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.635998 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"4e00d2f6-fe57-4a67-88e7-e1f101ba51da","Type":"ContainerStarted","Data":"74cb79dbb844ee0ba46996c2b7202c158c087f08b6ae0c3f6d13d109f29f1e18"} Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.638819 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-dbrmg" event={"ID":"6c5e797d-5b72-471a-bb61-329bd662a9ad","Type":"ContainerStarted","Data":"c322f0f781e048f74377629c3131cf38e060528bbb191ccadf37f04dcc30ba7b"} Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.638928 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57d769cc4f-dbrmg" Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.641570 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"d25252bd-b678-4684-abe0-933dc4ac926e","Type":"ContainerStarted","Data":"920731ed5a78b4e1556a773b193f3ff6ca602a1122cb165b73335f27abd17d94"} Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.643416 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-f5lv8" event={"ID":"d43f9e93-ab7b-4a2f-9446-21ab9721b39f","Type":"ContainerStarted","Data":"483816b6f797da59073b6d308ef25815b764bf9689686ea76170cd78f5d05eda"} Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.643702 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-f5lv8" Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.652235 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=10.694305323 podStartE2EDuration="17.652221438s" podCreationTimestamp="2025-11-25 17:02:45 +0000 UTC" firstStartedPulling="2025-11-25 17:02:54.254060543 +0000 UTC m=+949.094202648" lastFinishedPulling="2025-11-25 17:03:01.211976658 +0000 UTC m=+956.052118763" observedRunningTime="2025-11-25 17:03:02.649688851 +0000 UTC m=+957.489830946" watchObservedRunningTime="2025-11-25 17:03:02.652221438 +0000 UTC m=+957.492363533" Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.682019 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-psj48"] Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.683032 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-psj48" Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.687173 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.693451 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-psj48"] Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.727235 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-f5lv8" podStartSLOduration=4.719412315 podStartE2EDuration="11.727219083s" podCreationTimestamp="2025-11-25 17:02:51 +0000 UTC" firstStartedPulling="2025-11-25 17:02:54.308166599 +0000 UTC m=+949.148308694" lastFinishedPulling="2025-11-25 17:03:01.315973367 +0000 UTC m=+956.156115462" observedRunningTime="2025-11-25 17:03:02.726017121 +0000 UTC m=+957.566159216" watchObservedRunningTime="2025-11-25 17:03:02.727219083 +0000 UTC m=+957.567361178" Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.751314 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57d769cc4f-dbrmg" podStartSLOduration=21.315333494 podStartE2EDuration="21.751294336s" podCreationTimestamp="2025-11-25 17:02:41 +0000 UTC" firstStartedPulling="2025-11-25 17:02:53.885636397 +0000 UTC m=+948.725778492" lastFinishedPulling="2025-11-25 17:02:54.321597239 +0000 UTC m=+949.161739334" observedRunningTime="2025-11-25 17:03:02.744201467 +0000 UTC m=+957.584343552" watchObservedRunningTime="2025-11-25 17:03:02.751294336 +0000 UTC m=+957.591436431" Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.836303 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-nt59k"] Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.842919 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/d6518e64-5e2f-4ad8-beba-0cf915d5b698-ovs-rundir\") pod \"ovn-controller-metrics-psj48\" (UID: \"d6518e64-5e2f-4ad8-beba-0cf915d5b698\") " pod="openstack/ovn-controller-metrics-psj48" Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.842989 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d6518e64-5e2f-4ad8-beba-0cf915d5b698-config\") pod \"ovn-controller-metrics-psj48\" (UID: \"d6518e64-5e2f-4ad8-beba-0cf915d5b698\") " pod="openstack/ovn-controller-metrics-psj48" Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.843013 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/d6518e64-5e2f-4ad8-beba-0cf915d5b698-ovn-rundir\") pod \"ovn-controller-metrics-psj48\" (UID: \"d6518e64-5e2f-4ad8-beba-0cf915d5b698\") " pod="openstack/ovn-controller-metrics-psj48" Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.843090 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6518e64-5e2f-4ad8-beba-0cf915d5b698-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-psj48\" (UID: \"d6518e64-5e2f-4ad8-beba-0cf915d5b698\") " pod="openstack/ovn-controller-metrics-psj48" Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.843131 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fx6zm\" (UniqueName: \"kubernetes.io/projected/d6518e64-5e2f-4ad8-beba-0cf915d5b698-kube-api-access-fx6zm\") pod \"ovn-controller-metrics-psj48\" (UID: \"d6518e64-5e2f-4ad8-beba-0cf915d5b698\") " pod="openstack/ovn-controller-metrics-psj48" Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.843184 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6518e64-5e2f-4ad8-beba-0cf915d5b698-combined-ca-bundle\") pod \"ovn-controller-metrics-psj48\" (UID: \"d6518e64-5e2f-4ad8-beba-0cf915d5b698\") " pod="openstack/ovn-controller-metrics-psj48" Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.857545 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-q2zpv"] Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.858883 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-q2zpv" Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.874236 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.926675 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-q2zpv"] Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.944893 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vzrrx\" (UniqueName: \"kubernetes.io/projected/36d8c637-bb40-4f4c-9fdf-56bbface6e35-kube-api-access-vzrrx\") pod \"dnsmasq-dns-7fd796d7df-q2zpv\" (UID: \"36d8c637-bb40-4f4c-9fdf-56bbface6e35\") " pod="openstack/dnsmasq-dns-7fd796d7df-q2zpv" Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.945163 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6518e64-5e2f-4ad8-beba-0cf915d5b698-combined-ca-bundle\") pod \"ovn-controller-metrics-psj48\" (UID: \"d6518e64-5e2f-4ad8-beba-0cf915d5b698\") " pod="openstack/ovn-controller-metrics-psj48" Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.945288 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36d8c637-bb40-4f4c-9fdf-56bbface6e35-config\") pod \"dnsmasq-dns-7fd796d7df-q2zpv\" (UID: \"36d8c637-bb40-4f4c-9fdf-56bbface6e35\") " pod="openstack/dnsmasq-dns-7fd796d7df-q2zpv" Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.945379 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/36d8c637-bb40-4f4c-9fdf-56bbface6e35-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-q2zpv\" (UID: \"36d8c637-bb40-4f4c-9fdf-56bbface6e35\") " pod="openstack/dnsmasq-dns-7fd796d7df-q2zpv" Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.945453 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/36d8c637-bb40-4f4c-9fdf-56bbface6e35-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-q2zpv\" (UID: \"36d8c637-bb40-4f4c-9fdf-56bbface6e35\") " pod="openstack/dnsmasq-dns-7fd796d7df-q2zpv" Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.945581 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/d6518e64-5e2f-4ad8-beba-0cf915d5b698-ovs-rundir\") pod \"ovn-controller-metrics-psj48\" (UID: \"d6518e64-5e2f-4ad8-beba-0cf915d5b698\") " pod="openstack/ovn-controller-metrics-psj48" Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.945748 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d6518e64-5e2f-4ad8-beba-0cf915d5b698-config\") pod \"ovn-controller-metrics-psj48\" (UID: \"d6518e64-5e2f-4ad8-beba-0cf915d5b698\") " pod="openstack/ovn-controller-metrics-psj48" Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.945827 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/d6518e64-5e2f-4ad8-beba-0cf915d5b698-ovn-rundir\") pod \"ovn-controller-metrics-psj48\" (UID: \"d6518e64-5e2f-4ad8-beba-0cf915d5b698\") " pod="openstack/ovn-controller-metrics-psj48" Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.945904 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6518e64-5e2f-4ad8-beba-0cf915d5b698-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-psj48\" (UID: \"d6518e64-5e2f-4ad8-beba-0cf915d5b698\") " pod="openstack/ovn-controller-metrics-psj48" Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.945985 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fx6zm\" (UniqueName: \"kubernetes.io/projected/d6518e64-5e2f-4ad8-beba-0cf915d5b698-kube-api-access-fx6zm\") pod \"ovn-controller-metrics-psj48\" (UID: \"d6518e64-5e2f-4ad8-beba-0cf915d5b698\") " pod="openstack/ovn-controller-metrics-psj48" Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.946232 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/d6518e64-5e2f-4ad8-beba-0cf915d5b698-ovs-rundir\") pod \"ovn-controller-metrics-psj48\" (UID: \"d6518e64-5e2f-4ad8-beba-0cf915d5b698\") " pod="openstack/ovn-controller-metrics-psj48" Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.946407 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/d6518e64-5e2f-4ad8-beba-0cf915d5b698-ovn-rundir\") pod \"ovn-controller-metrics-psj48\" (UID: \"d6518e64-5e2f-4ad8-beba-0cf915d5b698\") " pod="openstack/ovn-controller-metrics-psj48" Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.947055 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d6518e64-5e2f-4ad8-beba-0cf915d5b698-config\") pod \"ovn-controller-metrics-psj48\" (UID: \"d6518e64-5e2f-4ad8-beba-0cf915d5b698\") " pod="openstack/ovn-controller-metrics-psj48" Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.952435 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6518e64-5e2f-4ad8-beba-0cf915d5b698-combined-ca-bundle\") pod \"ovn-controller-metrics-psj48\" (UID: \"d6518e64-5e2f-4ad8-beba-0cf915d5b698\") " pod="openstack/ovn-controller-metrics-psj48" Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.952978 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6518e64-5e2f-4ad8-beba-0cf915d5b698-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-psj48\" (UID: \"d6518e64-5e2f-4ad8-beba-0cf915d5b698\") " pod="openstack/ovn-controller-metrics-psj48" Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.975222 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fx6zm\" (UniqueName: \"kubernetes.io/projected/d6518e64-5e2f-4ad8-beba-0cf915d5b698-kube-api-access-fx6zm\") pod \"ovn-controller-metrics-psj48\" (UID: \"d6518e64-5e2f-4ad8-beba-0cf915d5b698\") " pod="openstack/ovn-controller-metrics-psj48" Nov 25 17:03:02 crc kubenswrapper[4812]: I1125 17:03:02.992841 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-dbrmg"] Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.011377 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-psj48" Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.022060 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-6tr4l"] Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.025593 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-6tr4l" Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.031403 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.051656 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36d8c637-bb40-4f4c-9fdf-56bbface6e35-config\") pod \"dnsmasq-dns-7fd796d7df-q2zpv\" (UID: \"36d8c637-bb40-4f4c-9fdf-56bbface6e35\") " pod="openstack/dnsmasq-dns-7fd796d7df-q2zpv" Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.051743 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/36d8c637-bb40-4f4c-9fdf-56bbface6e35-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-q2zpv\" (UID: \"36d8c637-bb40-4f4c-9fdf-56bbface6e35\") " pod="openstack/dnsmasq-dns-7fd796d7df-q2zpv" Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.051767 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/36d8c637-bb40-4f4c-9fdf-56bbface6e35-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-q2zpv\" (UID: \"36d8c637-bb40-4f4c-9fdf-56bbface6e35\") " pod="openstack/dnsmasq-dns-7fd796d7df-q2zpv" Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.051939 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vzrrx\" (UniqueName: \"kubernetes.io/projected/36d8c637-bb40-4f4c-9fdf-56bbface6e35-kube-api-access-vzrrx\") pod \"dnsmasq-dns-7fd796d7df-q2zpv\" (UID: \"36d8c637-bb40-4f4c-9fdf-56bbface6e35\") " pod="openstack/dnsmasq-dns-7fd796d7df-q2zpv" Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.052377 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-6tr4l"] Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.053270 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/36d8c637-bb40-4f4c-9fdf-56bbface6e35-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-q2zpv\" (UID: \"36d8c637-bb40-4f4c-9fdf-56bbface6e35\") " pod="openstack/dnsmasq-dns-7fd796d7df-q2zpv" Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.053364 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36d8c637-bb40-4f4c-9fdf-56bbface6e35-config\") pod \"dnsmasq-dns-7fd796d7df-q2zpv\" (UID: \"36d8c637-bb40-4f4c-9fdf-56bbface6e35\") " pod="openstack/dnsmasq-dns-7fd796d7df-q2zpv" Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.053962 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/36d8c637-bb40-4f4c-9fdf-56bbface6e35-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-q2zpv\" (UID: \"36d8c637-bb40-4f4c-9fdf-56bbface6e35\") " pod="openstack/dnsmasq-dns-7fd796d7df-q2zpv" Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.086669 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vzrrx\" (UniqueName: \"kubernetes.io/projected/36d8c637-bb40-4f4c-9fdf-56bbface6e35-kube-api-access-vzrrx\") pod \"dnsmasq-dns-7fd796d7df-q2zpv\" (UID: \"36d8c637-bb40-4f4c-9fdf-56bbface6e35\") " pod="openstack/dnsmasq-dns-7fd796d7df-q2zpv" Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.155153 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n6hf4\" (UniqueName: \"kubernetes.io/projected/e0f9d008-45d4-470b-b3f5-b4713a8730dd-kube-api-access-n6hf4\") pod \"dnsmasq-dns-86db49b7ff-6tr4l\" (UID: \"e0f9d008-45d4-470b-b3f5-b4713a8730dd\") " pod="openstack/dnsmasq-dns-86db49b7ff-6tr4l" Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.155410 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e0f9d008-45d4-470b-b3f5-b4713a8730dd-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-6tr4l\" (UID: \"e0f9d008-45d4-470b-b3f5-b4713a8730dd\") " pod="openstack/dnsmasq-dns-86db49b7ff-6tr4l" Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.155497 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e0f9d008-45d4-470b-b3f5-b4713a8730dd-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-6tr4l\" (UID: \"e0f9d008-45d4-470b-b3f5-b4713a8730dd\") " pod="openstack/dnsmasq-dns-86db49b7ff-6tr4l" Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.155768 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0f9d008-45d4-470b-b3f5-b4713a8730dd-config\") pod \"dnsmasq-dns-86db49b7ff-6tr4l\" (UID: \"e0f9d008-45d4-470b-b3f5-b4713a8730dd\") " pod="openstack/dnsmasq-dns-86db49b7ff-6tr4l" Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.155894 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e0f9d008-45d4-470b-b3f5-b4713a8730dd-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-6tr4l\" (UID: \"e0f9d008-45d4-470b-b3f5-b4713a8730dd\") " pod="openstack/dnsmasq-dns-86db49b7ff-6tr4l" Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.174798 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-q2zpv" Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.257545 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n6hf4\" (UniqueName: \"kubernetes.io/projected/e0f9d008-45d4-470b-b3f5-b4713a8730dd-kube-api-access-n6hf4\") pod \"dnsmasq-dns-86db49b7ff-6tr4l\" (UID: \"e0f9d008-45d4-470b-b3f5-b4713a8730dd\") " pod="openstack/dnsmasq-dns-86db49b7ff-6tr4l" Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.258155 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e0f9d008-45d4-470b-b3f5-b4713a8730dd-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-6tr4l\" (UID: \"e0f9d008-45d4-470b-b3f5-b4713a8730dd\") " pod="openstack/dnsmasq-dns-86db49b7ff-6tr4l" Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.258176 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e0f9d008-45d4-470b-b3f5-b4713a8730dd-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-6tr4l\" (UID: \"e0f9d008-45d4-470b-b3f5-b4713a8730dd\") " pod="openstack/dnsmasq-dns-86db49b7ff-6tr4l" Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.258214 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0f9d008-45d4-470b-b3f5-b4713a8730dd-config\") pod \"dnsmasq-dns-86db49b7ff-6tr4l\" (UID: \"e0f9d008-45d4-470b-b3f5-b4713a8730dd\") " pod="openstack/dnsmasq-dns-86db49b7ff-6tr4l" Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.258248 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e0f9d008-45d4-470b-b3f5-b4713a8730dd-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-6tr4l\" (UID: \"e0f9d008-45d4-470b-b3f5-b4713a8730dd\") " pod="openstack/dnsmasq-dns-86db49b7ff-6tr4l" Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.260199 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e0f9d008-45d4-470b-b3f5-b4713a8730dd-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-6tr4l\" (UID: \"e0f9d008-45d4-470b-b3f5-b4713a8730dd\") " pod="openstack/dnsmasq-dns-86db49b7ff-6tr4l" Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.260996 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e0f9d008-45d4-470b-b3f5-b4713a8730dd-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-6tr4l\" (UID: \"e0f9d008-45d4-470b-b3f5-b4713a8730dd\") " pod="openstack/dnsmasq-dns-86db49b7ff-6tr4l" Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.261839 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0f9d008-45d4-470b-b3f5-b4713a8730dd-config\") pod \"dnsmasq-dns-86db49b7ff-6tr4l\" (UID: \"e0f9d008-45d4-470b-b3f5-b4713a8730dd\") " pod="openstack/dnsmasq-dns-86db49b7ff-6tr4l" Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.264824 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e0f9d008-45d4-470b-b3f5-b4713a8730dd-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-6tr4l\" (UID: \"e0f9d008-45d4-470b-b3f5-b4713a8730dd\") " pod="openstack/dnsmasq-dns-86db49b7ff-6tr4l" Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.276588 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n6hf4\" (UniqueName: \"kubernetes.io/projected/e0f9d008-45d4-470b-b3f5-b4713a8730dd-kube-api-access-n6hf4\") pod \"dnsmasq-dns-86db49b7ff-6tr4l\" (UID: \"e0f9d008-45d4-470b-b3f5-b4713a8730dd\") " pod="openstack/dnsmasq-dns-86db49b7ff-6tr4l" Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.326053 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-6tr4l" Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.525508 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-psj48"] Nov 25 17:03:03 crc kubenswrapper[4812]: W1125 17:03:03.532521 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd6518e64_5e2f_4ad8_beba_0cf915d5b698.slice/crio-21dd635c624c706a451ab4df4ff2be5099e4386b9ca813e31e5ea783992c654e WatchSource:0}: Error finding container 21dd635c624c706a451ab4df4ff2be5099e4386b9ca813e31e5ea783992c654e: Status 404 returned error can't find the container with id 21dd635c624c706a451ab4df4ff2be5099e4386b9ca813e31e5ea783992c654e Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.653778 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"bf71da98-d4be-4c2f-a900-118282c5fa5f","Type":"ContainerStarted","Data":"d4dd23b8b48fdb9581e3623ad5103b7fef3d71ddf8eeeab3214504b8b1ae57e9"} Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.654141 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.662200 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-psj48" event={"ID":"d6518e64-5e2f-4ad8-beba-0cf915d5b698","Type":"ContainerStarted","Data":"21dd635c624c706a451ab4df4ff2be5099e4386b9ca813e31e5ea783992c654e"} Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.665131 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c7cd9664-97af-4900-a89e-ee5a790506c4","Type":"ContainerStarted","Data":"4c5ee131aae86943ba22365f36d2fef1657bf9647c0c6550feecccf1f6e993cb"} Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.669460 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"171759d9-0ee5-4a7c-9548-f41d11f0c112","Type":"ContainerStarted","Data":"b0d7547217f860c1a52ffd3dbef85deebcdef33e067ac69682b0cda16ff1953d"} Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.673778 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-qntcq" event={"ID":"86cceb9c-b87d-429f-8a66-8c6765fc4939","Type":"ContainerStarted","Data":"94d916c112fb78a1493b4bfc5c8c83e60d4947978156258036807f90641ab40b"} Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.673880 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=9.012356744 podStartE2EDuration="16.673860932s" podCreationTimestamp="2025-11-25 17:02:47 +0000 UTC" firstStartedPulling="2025-11-25 17:02:54.319355978 +0000 UTC m=+949.159498073" lastFinishedPulling="2025-11-25 17:03:01.980860166 +0000 UTC m=+956.821002261" observedRunningTime="2025-11-25 17:03:03.671374426 +0000 UTC m=+958.511516521" watchObservedRunningTime="2025-11-25 17:03:03.673860932 +0000 UTC m=+958.514003027" Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.680661 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"578466f7-9fe7-4e31-9006-58216401d68e","Type":"ContainerStarted","Data":"ff32ddb0c38d456f540dc680a84e6d64467eaab1721d66507c1a76dcfceff5f9"} Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.741838 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-q2zpv"] Nov 25 17:03:03 crc kubenswrapper[4812]: W1125 17:03:03.805760 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod36d8c637_bb40_4f4c_9fdf_56bbface6e35.slice/crio-4fafe168562b848b2c73cbe708be526fd2f5add22884d0e2c000b6b50346ea90 WatchSource:0}: Error finding container 4fafe168562b848b2c73cbe708be526fd2f5add22884d0e2c000b6b50346ea90: Status 404 returned error can't find the container with id 4fafe168562b848b2c73cbe708be526fd2f5add22884d0e2c000b6b50346ea90 Nov 25 17:03:03 crc kubenswrapper[4812]: I1125 17:03:03.851565 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-6tr4l"] Nov 25 17:03:03 crc kubenswrapper[4812]: W1125 17:03:03.865779 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode0f9d008_45d4_470b_b3f5_b4713a8730dd.slice/crio-79534fcbf259cbd42ad639bde879b4b0b258580ca92cd435a0052e59bbbb086a WatchSource:0}: Error finding container 79534fcbf259cbd42ad639bde879b4b0b258580ca92cd435a0052e59bbbb086a: Status 404 returned error can't find the container with id 79534fcbf259cbd42ad639bde879b4b0b258580ca92cd435a0052e59bbbb086a Nov 25 17:03:04 crc kubenswrapper[4812]: I1125 17:03:04.689236 4812 generic.go:334] "Generic (PLEG): container finished" podID="e0f9d008-45d4-470b-b3f5-b4713a8730dd" containerID="47ec22ae7c7f93448d58f28d9b083c0f3c8c3a2755e908f310704609d0f97238" exitCode=0 Nov 25 17:03:04 crc kubenswrapper[4812]: I1125 17:03:04.689446 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-6tr4l" event={"ID":"e0f9d008-45d4-470b-b3f5-b4713a8730dd","Type":"ContainerDied","Data":"47ec22ae7c7f93448d58f28d9b083c0f3c8c3a2755e908f310704609d0f97238"} Nov 25 17:03:04 crc kubenswrapper[4812]: I1125 17:03:04.689620 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-6tr4l" event={"ID":"e0f9d008-45d4-470b-b3f5-b4713a8730dd","Type":"ContainerStarted","Data":"79534fcbf259cbd42ad639bde879b4b0b258580ca92cd435a0052e59bbbb086a"} Nov 25 17:03:04 crc kubenswrapper[4812]: I1125 17:03:04.691484 4812 generic.go:334] "Generic (PLEG): container finished" podID="86cceb9c-b87d-429f-8a66-8c6765fc4939" containerID="94d916c112fb78a1493b4bfc5c8c83e60d4947978156258036807f90641ab40b" exitCode=0 Nov 25 17:03:04 crc kubenswrapper[4812]: I1125 17:03:04.691519 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-qntcq" event={"ID":"86cceb9c-b87d-429f-8a66-8c6765fc4939","Type":"ContainerDied","Data":"94d916c112fb78a1493b4bfc5c8c83e60d4947978156258036807f90641ab40b"} Nov 25 17:03:04 crc kubenswrapper[4812]: I1125 17:03:04.694397 4812 generic.go:334] "Generic (PLEG): container finished" podID="36d8c637-bb40-4f4c-9fdf-56bbface6e35" containerID="250e3e2e812a28ee2f95d9cfb7a437c0358a3e903f5ec3e2f834bf41771a2a34" exitCode=0 Nov 25 17:03:04 crc kubenswrapper[4812]: I1125 17:03:04.694594 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-q2zpv" event={"ID":"36d8c637-bb40-4f4c-9fdf-56bbface6e35","Type":"ContainerDied","Data":"250e3e2e812a28ee2f95d9cfb7a437c0358a3e903f5ec3e2f834bf41771a2a34"} Nov 25 17:03:04 crc kubenswrapper[4812]: I1125 17:03:04.694650 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-q2zpv" event={"ID":"36d8c637-bb40-4f4c-9fdf-56bbface6e35","Type":"ContainerStarted","Data":"4fafe168562b848b2c73cbe708be526fd2f5add22884d0e2c000b6b50346ea90"} Nov 25 17:03:04 crc kubenswrapper[4812]: I1125 17:03:04.695230 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5ccc8479f9-nt59k" podUID="2bd0bb6a-1a2f-4aac-87bc-d70993b16087" containerName="dnsmasq-dns" containerID="cri-o://2c3e4b724197dfdb90ac79b0ce79621a159728d7efb063e5815ff454c7ce59a2" gracePeriod=10 Nov 25 17:03:04 crc kubenswrapper[4812]: I1125 17:03:04.695805 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57d769cc4f-dbrmg" podUID="6c5e797d-5b72-471a-bb61-329bd662a9ad" containerName="dnsmasq-dns" containerID="cri-o://c322f0f781e048f74377629c3131cf38e060528bbb191ccadf37f04dcc30ba7b" gracePeriod=10 Nov 25 17:03:05 crc kubenswrapper[4812]: I1125 17:03:05.704449 4812 generic.go:334] "Generic (PLEG): container finished" podID="2bd0bb6a-1a2f-4aac-87bc-d70993b16087" containerID="2c3e4b724197dfdb90ac79b0ce79621a159728d7efb063e5815ff454c7ce59a2" exitCode=0 Nov 25 17:03:05 crc kubenswrapper[4812]: I1125 17:03:05.704539 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-nt59k" event={"ID":"2bd0bb6a-1a2f-4aac-87bc-d70993b16087","Type":"ContainerDied","Data":"2c3e4b724197dfdb90ac79b0ce79621a159728d7efb063e5815ff454c7ce59a2"} Nov 25 17:03:05 crc kubenswrapper[4812]: I1125 17:03:05.707034 4812 generic.go:334] "Generic (PLEG): container finished" podID="6c5e797d-5b72-471a-bb61-329bd662a9ad" containerID="c322f0f781e048f74377629c3131cf38e060528bbb191ccadf37f04dcc30ba7b" exitCode=0 Nov 25 17:03:05 crc kubenswrapper[4812]: I1125 17:03:05.707068 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-dbrmg" event={"ID":"6c5e797d-5b72-471a-bb61-329bd662a9ad","Type":"ContainerDied","Data":"c322f0f781e048f74377629c3131cf38e060528bbb191ccadf37f04dcc30ba7b"} Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.138740 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5ccc8479f9-nt59k" podUID="2bd0bb6a-1a2f-4aac-87bc-d70993b16087" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.101:5353: connect: connection refused" Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.534320 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-nt59k" Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.605554 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-dbrmg" Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.622034 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c5e797d-5b72-471a-bb61-329bd662a9ad-config\") pod \"6c5e797d-5b72-471a-bb61-329bd662a9ad\" (UID: \"6c5e797d-5b72-471a-bb61-329bd662a9ad\") " Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.622118 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6c5e797d-5b72-471a-bb61-329bd662a9ad-dns-svc\") pod \"6c5e797d-5b72-471a-bb61-329bd662a9ad\" (UID: \"6c5e797d-5b72-471a-bb61-329bd662a9ad\") " Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.622188 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jz2zs\" (UniqueName: \"kubernetes.io/projected/6c5e797d-5b72-471a-bb61-329bd662a9ad-kube-api-access-jz2zs\") pod \"6c5e797d-5b72-471a-bb61-329bd662a9ad\" (UID: \"6c5e797d-5b72-471a-bb61-329bd662a9ad\") " Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.622238 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bz56r\" (UniqueName: \"kubernetes.io/projected/2bd0bb6a-1a2f-4aac-87bc-d70993b16087-kube-api-access-bz56r\") pod \"2bd0bb6a-1a2f-4aac-87bc-d70993b16087\" (UID: \"2bd0bb6a-1a2f-4aac-87bc-d70993b16087\") " Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.622313 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2bd0bb6a-1a2f-4aac-87bc-d70993b16087-config\") pod \"2bd0bb6a-1a2f-4aac-87bc-d70993b16087\" (UID: \"2bd0bb6a-1a2f-4aac-87bc-d70993b16087\") " Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.622342 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2bd0bb6a-1a2f-4aac-87bc-d70993b16087-dns-svc\") pod \"2bd0bb6a-1a2f-4aac-87bc-d70993b16087\" (UID: \"2bd0bb6a-1a2f-4aac-87bc-d70993b16087\") " Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.638685 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c5e797d-5b72-471a-bb61-329bd662a9ad-kube-api-access-jz2zs" (OuterVolumeSpecName: "kube-api-access-jz2zs") pod "6c5e797d-5b72-471a-bb61-329bd662a9ad" (UID: "6c5e797d-5b72-471a-bb61-329bd662a9ad"). InnerVolumeSpecName "kube-api-access-jz2zs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.648152 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2bd0bb6a-1a2f-4aac-87bc-d70993b16087-kube-api-access-bz56r" (OuterVolumeSpecName: "kube-api-access-bz56r") pod "2bd0bb6a-1a2f-4aac-87bc-d70993b16087" (UID: "2bd0bb6a-1a2f-4aac-87bc-d70993b16087"). InnerVolumeSpecName "kube-api-access-bz56r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.684708 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2bd0bb6a-1a2f-4aac-87bc-d70993b16087-config" (OuterVolumeSpecName: "config") pod "2bd0bb6a-1a2f-4aac-87bc-d70993b16087" (UID: "2bd0bb6a-1a2f-4aac-87bc-d70993b16087"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.687942 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2bd0bb6a-1a2f-4aac-87bc-d70993b16087-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2bd0bb6a-1a2f-4aac-87bc-d70993b16087" (UID: "2bd0bb6a-1a2f-4aac-87bc-d70993b16087"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.690868 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c5e797d-5b72-471a-bb61-329bd662a9ad-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6c5e797d-5b72-471a-bb61-329bd662a9ad" (UID: "6c5e797d-5b72-471a-bb61-329bd662a9ad"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.703711 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c5e797d-5b72-471a-bb61-329bd662a9ad-config" (OuterVolumeSpecName: "config") pod "6c5e797d-5b72-471a-bb61-329bd662a9ad" (UID: "6c5e797d-5b72-471a-bb61-329bd662a9ad"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.715083 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-psj48" event={"ID":"d6518e64-5e2f-4ad8-beba-0cf915d5b698","Type":"ContainerStarted","Data":"8613782295bd47f3f98f6bf0876e8d03d5378ac62f4a3b8caf64c2b0a5ea8afe"} Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.719900 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-nt59k" event={"ID":"2bd0bb6a-1a2f-4aac-87bc-d70993b16087","Type":"ContainerDied","Data":"1f88e505f0c34e6c56fe8f1975c3d56dcad1acad2b3480e07739824e64f323cf"} Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.719956 4812 scope.go:117] "RemoveContainer" containerID="2c3e4b724197dfdb90ac79b0ce79621a159728d7efb063e5815ff454c7ce59a2" Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.719924 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-nt59k" Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.723736 4812 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2bd0bb6a-1a2f-4aac-87bc-d70993b16087-config\") on node \"crc\" DevicePath \"\"" Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.723762 4812 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2bd0bb6a-1a2f-4aac-87bc-d70993b16087-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.723773 4812 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c5e797d-5b72-471a-bb61-329bd662a9ad-config\") on node \"crc\" DevicePath \"\"" Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.723781 4812 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6c5e797d-5b72-471a-bb61-329bd662a9ad-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.723792 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jz2zs\" (UniqueName: \"kubernetes.io/projected/6c5e797d-5b72-471a-bb61-329bd662a9ad-kube-api-access-jz2zs\") on node \"crc\" DevicePath \"\"" Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.723801 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bz56r\" (UniqueName: \"kubernetes.io/projected/2bd0bb6a-1a2f-4aac-87bc-d70993b16087-kube-api-access-bz56r\") on node \"crc\" DevicePath \"\"" Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.724416 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"79c51471-1ba0-4dc3-927c-1057283fe10b","Type":"ContainerStarted","Data":"5ecc14279e787787476b1e7907a9ec36bf481ffa94f152f7b7e1f34b604231f2"} Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.733433 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-qntcq" event={"ID":"86cceb9c-b87d-429f-8a66-8c6765fc4939","Type":"ContainerStarted","Data":"65f0e9ada0b080860a6de20841265fb1679ee8916ec1f849bd4cd7601be3a814"} Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.736907 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-psj48" podStartSLOduration=1.9847413120000001 podStartE2EDuration="4.736883055s" podCreationTimestamp="2025-11-25 17:03:02 +0000 UTC" firstStartedPulling="2025-11-25 17:03:03.53980828 +0000 UTC m=+958.379950375" lastFinishedPulling="2025-11-25 17:03:06.291950023 +0000 UTC m=+961.132092118" observedRunningTime="2025-11-25 17:03:06.73259678 +0000 UTC m=+961.572738875" watchObservedRunningTime="2025-11-25 17:03:06.736883055 +0000 UTC m=+961.577025150" Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.738210 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-q2zpv" event={"ID":"36d8c637-bb40-4f4c-9fdf-56bbface6e35","Type":"ContainerStarted","Data":"f32ec7b1798fdb8f63086ef38771c208f7116c8f13241b5f1454dd0ba56cd7ad"} Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.738858 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7fd796d7df-q2zpv" Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.753910 4812 scope.go:117] "RemoveContainer" containerID="889c19ea5169978409ece33c4ba463dbfe5bcac4b24cc5822ad6e3aeb1986ab5" Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.761707 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=5.842438888 podStartE2EDuration="15.761682827s" podCreationTimestamp="2025-11-25 17:02:51 +0000 UTC" firstStartedPulling="2025-11-25 17:02:56.431019213 +0000 UTC m=+951.271161308" lastFinishedPulling="2025-11-25 17:03:06.350263162 +0000 UTC m=+961.190405247" observedRunningTime="2025-11-25 17:03:06.754451664 +0000 UTC m=+961.594593759" watchObservedRunningTime="2025-11-25 17:03:06.761682827 +0000 UTC m=+961.601824942" Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.766766 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-6tr4l" event={"ID":"e0f9d008-45d4-470b-b3f5-b4713a8730dd","Type":"ContainerStarted","Data":"32e0e4507b955f305c62c0c339a7ef214d4cddd6325e9baf88c34d4974bab7f7"} Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.766885 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-86db49b7ff-6tr4l" Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.775442 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"4e00d2f6-fe57-4a67-88e7-e1f101ba51da","Type":"ContainerStarted","Data":"fb10d6b347e8e406d2b336b98216afe8ada736215128a425b3d1c236562ddf9c"} Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.778036 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-nt59k"] Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.778625 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-dbrmg" Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.778646 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-dbrmg" event={"ID":"6c5e797d-5b72-471a-bb61-329bd662a9ad","Type":"ContainerDied","Data":"021edd3c53d3822618b86bb48bfdc16256d74a26a50ee7389e4c3f16d82bcbf5"} Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.781475 4812 generic.go:334] "Generic (PLEG): container finished" podID="d25252bd-b678-4684-abe0-933dc4ac926e" containerID="920731ed5a78b4e1556a773b193f3ff6ca602a1122cb165b73335f27abd17d94" exitCode=0 Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.781509 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"d25252bd-b678-4684-abe0-933dc4ac926e","Type":"ContainerDied","Data":"920731ed5a78b4e1556a773b193f3ff6ca602a1122cb165b73335f27abd17d94"} Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.786906 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-nt59k"] Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.789822 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7fd796d7df-q2zpv" podStartSLOduration=4.789803909 podStartE2EDuration="4.789803909s" podCreationTimestamp="2025-11-25 17:03:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:03:06.788584307 +0000 UTC m=+961.628726422" watchObservedRunningTime="2025-11-25 17:03:06.789803909 +0000 UTC m=+961.629945994" Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.799171 4812 scope.go:117] "RemoveContainer" containerID="c322f0f781e048f74377629c3131cf38e060528bbb191ccadf37f04dcc30ba7b" Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.845143 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-86db49b7ff-6tr4l" podStartSLOduration=4.8451272880000005 podStartE2EDuration="4.845127288s" podCreationTimestamp="2025-11-25 17:03:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:03:06.826752887 +0000 UTC m=+961.666894982" watchObservedRunningTime="2025-11-25 17:03:06.845127288 +0000 UTC m=+961.685269383" Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.846832 4812 scope.go:117] "RemoveContainer" containerID="a95def92c669421e774e489d70351ec0f03da986b87d4fc55af787be5dc612c0" Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.865821 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=4.937177681 podStartE2EDuration="16.865806971s" podCreationTimestamp="2025-11-25 17:02:50 +0000 UTC" firstStartedPulling="2025-11-25 17:02:54.358096694 +0000 UTC m=+949.198238789" lastFinishedPulling="2025-11-25 17:03:06.286725974 +0000 UTC m=+961.126868079" observedRunningTime="2025-11-25 17:03:06.860967691 +0000 UTC m=+961.701109786" watchObservedRunningTime="2025-11-25 17:03:06.865806971 +0000 UTC m=+961.705949066" Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.883255 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-dbrmg"] Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.887436 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-dbrmg"] Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.912803 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.912863 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 25 17:03:06 crc kubenswrapper[4812]: I1125 17:03:06.948815 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 25 17:03:07 crc kubenswrapper[4812]: I1125 17:03:07.792281 4812 generic.go:334] "Generic (PLEG): container finished" podID="578466f7-9fe7-4e31-9006-58216401d68e" containerID="ff32ddb0c38d456f540dc680a84e6d64467eaab1721d66507c1a76dcfceff5f9" exitCode=0 Nov 25 17:03:07 crc kubenswrapper[4812]: I1125 17:03:07.792647 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"578466f7-9fe7-4e31-9006-58216401d68e","Type":"ContainerDied","Data":"ff32ddb0c38d456f540dc680a84e6d64467eaab1721d66507c1a76dcfceff5f9"} Nov 25 17:03:07 crc kubenswrapper[4812]: I1125 17:03:07.798770 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-qntcq" event={"ID":"86cceb9c-b87d-429f-8a66-8c6765fc4939","Type":"ContainerStarted","Data":"4f58ff6ee2178dcdbece516b6417e1dabf7cbf6d9249869aae488825b04af920"} Nov 25 17:03:07 crc kubenswrapper[4812]: I1125 17:03:07.799724 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-qntcq" Nov 25 17:03:07 crc kubenswrapper[4812]: I1125 17:03:07.801510 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"d25252bd-b678-4684-abe0-933dc4ac926e","Type":"ContainerStarted","Data":"92f1fcf5cec07f465effbb18a25d62108d4ba9e0909ace1e09b0c22e21473fba"} Nov 25 17:03:07 crc kubenswrapper[4812]: I1125 17:03:07.850044 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=19.159897778 podStartE2EDuration="25.850022985s" podCreationTimestamp="2025-11-25 17:02:42 +0000 UTC" firstStartedPulling="2025-11-25 17:02:54.309000652 +0000 UTC m=+949.149142737" lastFinishedPulling="2025-11-25 17:03:00.999125849 +0000 UTC m=+955.839267944" observedRunningTime="2025-11-25 17:03:07.841060255 +0000 UTC m=+962.681202360" watchObservedRunningTime="2025-11-25 17:03:07.850022985 +0000 UTC m=+962.690165080" Nov 25 17:03:07 crc kubenswrapper[4812]: I1125 17:03:07.852879 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2bd0bb6a-1a2f-4aac-87bc-d70993b16087" path="/var/lib/kubelet/pods/2bd0bb6a-1a2f-4aac-87bc-d70993b16087/volumes" Nov 25 17:03:07 crc kubenswrapper[4812]: I1125 17:03:07.853646 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c5e797d-5b72-471a-bb61-329bd662a9ad" path="/var/lib/kubelet/pods/6c5e797d-5b72-471a-bb61-329bd662a9ad/volumes" Nov 25 17:03:07 crc kubenswrapper[4812]: I1125 17:03:07.855679 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 25 17:03:07 crc kubenswrapper[4812]: I1125 17:03:07.880403 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-qntcq" podStartSLOduration=12.096412302 podStartE2EDuration="16.880384336s" podCreationTimestamp="2025-11-25 17:02:51 +0000 UTC" firstStartedPulling="2025-11-25 17:02:56.427957832 +0000 UTC m=+951.268099927" lastFinishedPulling="2025-11-25 17:03:01.211929856 +0000 UTC m=+956.052071961" observedRunningTime="2025-11-25 17:03:07.863569126 +0000 UTC m=+962.703711231" watchObservedRunningTime="2025-11-25 17:03:07.880384336 +0000 UTC m=+962.720526431" Nov 25 17:03:08 crc kubenswrapper[4812]: I1125 17:03:08.091047 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 25 17:03:08 crc kubenswrapper[4812]: I1125 17:03:08.091429 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 25 17:03:08 crc kubenswrapper[4812]: I1125 17:03:08.129470 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 25 17:03:08 crc kubenswrapper[4812]: I1125 17:03:08.813293 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"578466f7-9fe7-4e31-9006-58216401d68e","Type":"ContainerStarted","Data":"450d4f76c0457313fc1176b0acdf7a8fa5be057a5eb631f1efc44dc6e1232e01"} Nov 25 17:03:08 crc kubenswrapper[4812]: I1125 17:03:08.814027 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-qntcq" Nov 25 17:03:08 crc kubenswrapper[4812]: I1125 17:03:08.843695 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=17.940430647 podStartE2EDuration="24.84366453s" podCreationTimestamp="2025-11-25 17:02:44 +0000 UTC" firstStartedPulling="2025-11-25 17:02:54.308727314 +0000 UTC m=+949.148869409" lastFinishedPulling="2025-11-25 17:03:01.211961187 +0000 UTC m=+956.052103292" observedRunningTime="2025-11-25 17:03:08.830221721 +0000 UTC m=+963.670363816" watchObservedRunningTime="2025-11-25 17:03:08.84366453 +0000 UTC m=+963.683806625" Nov 25 17:03:08 crc kubenswrapper[4812]: I1125 17:03:08.850863 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 25 17:03:09 crc kubenswrapper[4812]: I1125 17:03:09.017464 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 25 17:03:09 crc kubenswrapper[4812]: E1125 17:03:09.021904 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2bd0bb6a-1a2f-4aac-87bc-d70993b16087" containerName="dnsmasq-dns" Nov 25 17:03:09 crc kubenswrapper[4812]: I1125 17:03:09.022221 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="2bd0bb6a-1a2f-4aac-87bc-d70993b16087" containerName="dnsmasq-dns" Nov 25 17:03:09 crc kubenswrapper[4812]: E1125 17:03:09.022296 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2bd0bb6a-1a2f-4aac-87bc-d70993b16087" containerName="init" Nov 25 17:03:09 crc kubenswrapper[4812]: I1125 17:03:09.022348 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="2bd0bb6a-1a2f-4aac-87bc-d70993b16087" containerName="init" Nov 25 17:03:09 crc kubenswrapper[4812]: E1125 17:03:09.022478 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c5e797d-5b72-471a-bb61-329bd662a9ad" containerName="init" Nov 25 17:03:09 crc kubenswrapper[4812]: I1125 17:03:09.022572 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c5e797d-5b72-471a-bb61-329bd662a9ad" containerName="init" Nov 25 17:03:09 crc kubenswrapper[4812]: E1125 17:03:09.022645 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c5e797d-5b72-471a-bb61-329bd662a9ad" containerName="dnsmasq-dns" Nov 25 17:03:09 crc kubenswrapper[4812]: I1125 17:03:09.022699 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c5e797d-5b72-471a-bb61-329bd662a9ad" containerName="dnsmasq-dns" Nov 25 17:03:09 crc kubenswrapper[4812]: I1125 17:03:09.023132 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c5e797d-5b72-471a-bb61-329bd662a9ad" containerName="dnsmasq-dns" Nov 25 17:03:09 crc kubenswrapper[4812]: I1125 17:03:09.023218 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="2bd0bb6a-1a2f-4aac-87bc-d70993b16087" containerName="dnsmasq-dns" Nov 25 17:03:09 crc kubenswrapper[4812]: I1125 17:03:09.024436 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 25 17:03:09 crc kubenswrapper[4812]: I1125 17:03:09.027185 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 25 17:03:09 crc kubenswrapper[4812]: I1125 17:03:09.032397 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 25 17:03:09 crc kubenswrapper[4812]: I1125 17:03:09.032906 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 25 17:03:09 crc kubenswrapper[4812]: I1125 17:03:09.033088 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-2k824" Nov 25 17:03:09 crc kubenswrapper[4812]: I1125 17:03:09.034228 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 25 17:03:09 crc kubenswrapper[4812]: I1125 17:03:09.084147 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/b67fcd17-39f7-4b26-be95-01a6223ec74d-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"b67fcd17-39f7-4b26-be95-01a6223ec74d\") " pod="openstack/ovn-northd-0" Nov 25 17:03:09 crc kubenswrapper[4812]: I1125 17:03:09.084268 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/b67fcd17-39f7-4b26-be95-01a6223ec74d-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"b67fcd17-39f7-4b26-be95-01a6223ec74d\") " pod="openstack/ovn-northd-0" Nov 25 17:03:09 crc kubenswrapper[4812]: I1125 17:03:09.084327 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b67fcd17-39f7-4b26-be95-01a6223ec74d-config\") pod \"ovn-northd-0\" (UID: \"b67fcd17-39f7-4b26-be95-01a6223ec74d\") " pod="openstack/ovn-northd-0" Nov 25 17:03:09 crc kubenswrapper[4812]: I1125 17:03:09.084502 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b67fcd17-39f7-4b26-be95-01a6223ec74d-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"b67fcd17-39f7-4b26-be95-01a6223ec74d\") " pod="openstack/ovn-northd-0" Nov 25 17:03:09 crc kubenswrapper[4812]: I1125 17:03:09.084599 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/b67fcd17-39f7-4b26-be95-01a6223ec74d-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"b67fcd17-39f7-4b26-be95-01a6223ec74d\") " pod="openstack/ovn-northd-0" Nov 25 17:03:09 crc kubenswrapper[4812]: I1125 17:03:09.084691 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b67fcd17-39f7-4b26-be95-01a6223ec74d-scripts\") pod \"ovn-northd-0\" (UID: \"b67fcd17-39f7-4b26-be95-01a6223ec74d\") " pod="openstack/ovn-northd-0" Nov 25 17:03:09 crc kubenswrapper[4812]: I1125 17:03:09.084728 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lsnd2\" (UniqueName: \"kubernetes.io/projected/b67fcd17-39f7-4b26-be95-01a6223ec74d-kube-api-access-lsnd2\") pod \"ovn-northd-0\" (UID: \"b67fcd17-39f7-4b26-be95-01a6223ec74d\") " pod="openstack/ovn-northd-0" Nov 25 17:03:09 crc kubenswrapper[4812]: I1125 17:03:09.186708 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/b67fcd17-39f7-4b26-be95-01a6223ec74d-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"b67fcd17-39f7-4b26-be95-01a6223ec74d\") " pod="openstack/ovn-northd-0" Nov 25 17:03:09 crc kubenswrapper[4812]: I1125 17:03:09.186788 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b67fcd17-39f7-4b26-be95-01a6223ec74d-scripts\") pod \"ovn-northd-0\" (UID: \"b67fcd17-39f7-4b26-be95-01a6223ec74d\") " pod="openstack/ovn-northd-0" Nov 25 17:03:09 crc kubenswrapper[4812]: I1125 17:03:09.186812 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lsnd2\" (UniqueName: \"kubernetes.io/projected/b67fcd17-39f7-4b26-be95-01a6223ec74d-kube-api-access-lsnd2\") pod \"ovn-northd-0\" (UID: \"b67fcd17-39f7-4b26-be95-01a6223ec74d\") " pod="openstack/ovn-northd-0" Nov 25 17:03:09 crc kubenswrapper[4812]: I1125 17:03:09.186861 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/b67fcd17-39f7-4b26-be95-01a6223ec74d-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"b67fcd17-39f7-4b26-be95-01a6223ec74d\") " pod="openstack/ovn-northd-0" Nov 25 17:03:09 crc kubenswrapper[4812]: I1125 17:03:09.186889 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/b67fcd17-39f7-4b26-be95-01a6223ec74d-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"b67fcd17-39f7-4b26-be95-01a6223ec74d\") " pod="openstack/ovn-northd-0" Nov 25 17:03:09 crc kubenswrapper[4812]: I1125 17:03:09.186916 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b67fcd17-39f7-4b26-be95-01a6223ec74d-config\") pod \"ovn-northd-0\" (UID: \"b67fcd17-39f7-4b26-be95-01a6223ec74d\") " pod="openstack/ovn-northd-0" Nov 25 17:03:09 crc kubenswrapper[4812]: I1125 17:03:09.186962 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b67fcd17-39f7-4b26-be95-01a6223ec74d-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"b67fcd17-39f7-4b26-be95-01a6223ec74d\") " pod="openstack/ovn-northd-0" Nov 25 17:03:09 crc kubenswrapper[4812]: I1125 17:03:09.187525 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/b67fcd17-39f7-4b26-be95-01a6223ec74d-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"b67fcd17-39f7-4b26-be95-01a6223ec74d\") " pod="openstack/ovn-northd-0" Nov 25 17:03:09 crc kubenswrapper[4812]: I1125 17:03:09.188270 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b67fcd17-39f7-4b26-be95-01a6223ec74d-scripts\") pod \"ovn-northd-0\" (UID: \"b67fcd17-39f7-4b26-be95-01a6223ec74d\") " pod="openstack/ovn-northd-0" Nov 25 17:03:09 crc kubenswrapper[4812]: I1125 17:03:09.188722 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b67fcd17-39f7-4b26-be95-01a6223ec74d-config\") pod \"ovn-northd-0\" (UID: \"b67fcd17-39f7-4b26-be95-01a6223ec74d\") " pod="openstack/ovn-northd-0" Nov 25 17:03:09 crc kubenswrapper[4812]: I1125 17:03:09.194188 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b67fcd17-39f7-4b26-be95-01a6223ec74d-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"b67fcd17-39f7-4b26-be95-01a6223ec74d\") " pod="openstack/ovn-northd-0" Nov 25 17:03:09 crc kubenswrapper[4812]: I1125 17:03:09.194837 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/b67fcd17-39f7-4b26-be95-01a6223ec74d-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"b67fcd17-39f7-4b26-be95-01a6223ec74d\") " pod="openstack/ovn-northd-0" Nov 25 17:03:09 crc kubenswrapper[4812]: I1125 17:03:09.195177 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/b67fcd17-39f7-4b26-be95-01a6223ec74d-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"b67fcd17-39f7-4b26-be95-01a6223ec74d\") " pod="openstack/ovn-northd-0" Nov 25 17:03:09 crc kubenswrapper[4812]: I1125 17:03:09.205855 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lsnd2\" (UniqueName: \"kubernetes.io/projected/b67fcd17-39f7-4b26-be95-01a6223ec74d-kube-api-access-lsnd2\") pod \"ovn-northd-0\" (UID: \"b67fcd17-39f7-4b26-be95-01a6223ec74d\") " pod="openstack/ovn-northd-0" Nov 25 17:03:09 crc kubenswrapper[4812]: I1125 17:03:09.351455 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 25 17:03:09 crc kubenswrapper[4812]: I1125 17:03:09.787551 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 25 17:03:09 crc kubenswrapper[4812]: I1125 17:03:09.822573 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"b67fcd17-39f7-4b26-be95-01a6223ec74d","Type":"ContainerStarted","Data":"fa944a055dff95b72d4906cdf28d0c6cad1e5e33731a87376f01f261915f7148"} Nov 25 17:03:10 crc kubenswrapper[4812]: I1125 17:03:10.847200 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 25 17:03:11 crc kubenswrapper[4812]: I1125 17:03:11.449495 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-57d769cc4f-dbrmg" podUID="6c5e797d-5b72-471a-bb61-329bd662a9ad" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.102:5353: i/o timeout" Nov 25 17:03:13 crc kubenswrapper[4812]: I1125 17:03:13.177226 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7fd796d7df-q2zpv" Nov 25 17:03:13 crc kubenswrapper[4812]: I1125 17:03:13.327679 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-86db49b7ff-6tr4l" Nov 25 17:03:13 crc kubenswrapper[4812]: I1125 17:03:13.383918 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-q2zpv"] Nov 25 17:03:13 crc kubenswrapper[4812]: I1125 17:03:13.848880 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7fd796d7df-q2zpv" podUID="36d8c637-bb40-4f4c-9fdf-56bbface6e35" containerName="dnsmasq-dns" containerID="cri-o://f32ec7b1798fdb8f63086ef38771c208f7116c8f13241b5f1454dd0ba56cd7ad" gracePeriod=10 Nov 25 17:03:13 crc kubenswrapper[4812]: I1125 17:03:13.986664 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 25 17:03:13 crc kubenswrapper[4812]: I1125 17:03:13.987044 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 25 17:03:14 crc kubenswrapper[4812]: I1125 17:03:14.073649 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 25 17:03:14 crc kubenswrapper[4812]: I1125 17:03:14.858676 4812 generic.go:334] "Generic (PLEG): container finished" podID="36d8c637-bb40-4f4c-9fdf-56bbface6e35" containerID="f32ec7b1798fdb8f63086ef38771c208f7116c8f13241b5f1454dd0ba56cd7ad" exitCode=0 Nov 25 17:03:14 crc kubenswrapper[4812]: I1125 17:03:14.858984 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-q2zpv" event={"ID":"36d8c637-bb40-4f4c-9fdf-56bbface6e35","Type":"ContainerDied","Data":"f32ec7b1798fdb8f63086ef38771c208f7116c8f13241b5f1454dd0ba56cd7ad"} Nov 25 17:03:14 crc kubenswrapper[4812]: I1125 17:03:14.931202 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 25 17:03:15 crc kubenswrapper[4812]: I1125 17:03:15.472976 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-d799-account-create-f4x79"] Nov 25 17:03:15 crc kubenswrapper[4812]: I1125 17:03:15.474071 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-d799-account-create-f4x79" Nov 25 17:03:15 crc kubenswrapper[4812]: I1125 17:03:15.476463 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 25 17:03:15 crc kubenswrapper[4812]: I1125 17:03:15.484009 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-d799-account-create-f4x79"] Nov 25 17:03:15 crc kubenswrapper[4812]: I1125 17:03:15.537217 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-g9jm8"] Nov 25 17:03:15 crc kubenswrapper[4812]: I1125 17:03:15.539131 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-g9jm8" Nov 25 17:03:15 crc kubenswrapper[4812]: I1125 17:03:15.546040 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-g9jm8"] Nov 25 17:03:15 crc kubenswrapper[4812]: I1125 17:03:15.589430 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-chvvd\" (UniqueName: \"kubernetes.io/projected/8286a549-6491-4255-9332-282eb9297c35-kube-api-access-chvvd\") pod \"keystone-d799-account-create-f4x79\" (UID: \"8286a549-6491-4255-9332-282eb9297c35\") " pod="openstack/keystone-d799-account-create-f4x79" Nov 25 17:03:15 crc kubenswrapper[4812]: I1125 17:03:15.589679 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zqkv8\" (UniqueName: \"kubernetes.io/projected/908bb661-4226-4cb0-8527-b9a93f6048e1-kube-api-access-zqkv8\") pod \"keystone-db-create-g9jm8\" (UID: \"908bb661-4226-4cb0-8527-b9a93f6048e1\") " pod="openstack/keystone-db-create-g9jm8" Nov 25 17:03:15 crc kubenswrapper[4812]: I1125 17:03:15.589779 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8286a549-6491-4255-9332-282eb9297c35-operator-scripts\") pod \"keystone-d799-account-create-f4x79\" (UID: \"8286a549-6491-4255-9332-282eb9297c35\") " pod="openstack/keystone-d799-account-create-f4x79" Nov 25 17:03:15 crc kubenswrapper[4812]: I1125 17:03:15.589853 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/908bb661-4226-4cb0-8527-b9a93f6048e1-operator-scripts\") pod \"keystone-db-create-g9jm8\" (UID: \"908bb661-4226-4cb0-8527-b9a93f6048e1\") " pod="openstack/keystone-db-create-g9jm8" Nov 25 17:03:15 crc kubenswrapper[4812]: I1125 17:03:15.668190 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 25 17:03:15 crc kubenswrapper[4812]: I1125 17:03:15.668245 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 25 17:03:15 crc kubenswrapper[4812]: I1125 17:03:15.691816 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zqkv8\" (UniqueName: \"kubernetes.io/projected/908bb661-4226-4cb0-8527-b9a93f6048e1-kube-api-access-zqkv8\") pod \"keystone-db-create-g9jm8\" (UID: \"908bb661-4226-4cb0-8527-b9a93f6048e1\") " pod="openstack/keystone-db-create-g9jm8" Nov 25 17:03:15 crc kubenswrapper[4812]: I1125 17:03:15.691875 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8286a549-6491-4255-9332-282eb9297c35-operator-scripts\") pod \"keystone-d799-account-create-f4x79\" (UID: \"8286a549-6491-4255-9332-282eb9297c35\") " pod="openstack/keystone-d799-account-create-f4x79" Nov 25 17:03:15 crc kubenswrapper[4812]: I1125 17:03:15.691906 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/908bb661-4226-4cb0-8527-b9a93f6048e1-operator-scripts\") pod \"keystone-db-create-g9jm8\" (UID: \"908bb661-4226-4cb0-8527-b9a93f6048e1\") " pod="openstack/keystone-db-create-g9jm8" Nov 25 17:03:15 crc kubenswrapper[4812]: I1125 17:03:15.692033 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-chvvd\" (UniqueName: \"kubernetes.io/projected/8286a549-6491-4255-9332-282eb9297c35-kube-api-access-chvvd\") pod \"keystone-d799-account-create-f4x79\" (UID: \"8286a549-6491-4255-9332-282eb9297c35\") " pod="openstack/keystone-d799-account-create-f4x79" Nov 25 17:03:15 crc kubenswrapper[4812]: I1125 17:03:15.692886 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8286a549-6491-4255-9332-282eb9297c35-operator-scripts\") pod \"keystone-d799-account-create-f4x79\" (UID: \"8286a549-6491-4255-9332-282eb9297c35\") " pod="openstack/keystone-d799-account-create-f4x79" Nov 25 17:03:15 crc kubenswrapper[4812]: I1125 17:03:15.693434 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/908bb661-4226-4cb0-8527-b9a93f6048e1-operator-scripts\") pod \"keystone-db-create-g9jm8\" (UID: \"908bb661-4226-4cb0-8527-b9a93f6048e1\") " pod="openstack/keystone-db-create-g9jm8" Nov 25 17:03:15 crc kubenswrapper[4812]: I1125 17:03:15.711452 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-chvvd\" (UniqueName: \"kubernetes.io/projected/8286a549-6491-4255-9332-282eb9297c35-kube-api-access-chvvd\") pod \"keystone-d799-account-create-f4x79\" (UID: \"8286a549-6491-4255-9332-282eb9297c35\") " pod="openstack/keystone-d799-account-create-f4x79" Nov 25 17:03:15 crc kubenswrapper[4812]: I1125 17:03:15.715486 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zqkv8\" (UniqueName: \"kubernetes.io/projected/908bb661-4226-4cb0-8527-b9a93f6048e1-kube-api-access-zqkv8\") pod \"keystone-db-create-g9jm8\" (UID: \"908bb661-4226-4cb0-8527-b9a93f6048e1\") " pod="openstack/keystone-db-create-g9jm8" Nov 25 17:03:15 crc kubenswrapper[4812]: I1125 17:03:15.791151 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-d799-account-create-f4x79" Nov 25 17:03:15 crc kubenswrapper[4812]: I1125 17:03:15.856799 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-g9jm8" Nov 25 17:03:15 crc kubenswrapper[4812]: I1125 17:03:15.867875 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-z45dw"] Nov 25 17:03:15 crc kubenswrapper[4812]: I1125 17:03:15.872870 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-z45dw" Nov 25 17:03:15 crc kubenswrapper[4812]: I1125 17:03:15.898652 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-z45dw"] Nov 25 17:03:15 crc kubenswrapper[4812]: I1125 17:03:15.983075 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-7b45-account-create-shvlr"] Nov 25 17:03:15 crc kubenswrapper[4812]: I1125 17:03:15.984836 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-7b45-account-create-shvlr" Nov 25 17:03:15 crc kubenswrapper[4812]: I1125 17:03:15.987271 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 25 17:03:15 crc kubenswrapper[4812]: I1125 17:03:15.989925 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-7b45-account-create-shvlr"] Nov 25 17:03:16 crc kubenswrapper[4812]: I1125 17:03:15.997931 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-58jf7\" (UniqueName: \"kubernetes.io/projected/0489df11-52f0-4093-9210-e621ba69425c-kube-api-access-58jf7\") pod \"placement-db-create-z45dw\" (UID: \"0489df11-52f0-4093-9210-e621ba69425c\") " pod="openstack/placement-db-create-z45dw" Nov 25 17:03:16 crc kubenswrapper[4812]: I1125 17:03:15.998076 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0489df11-52f0-4093-9210-e621ba69425c-operator-scripts\") pod \"placement-db-create-z45dw\" (UID: \"0489df11-52f0-4093-9210-e621ba69425c\") " pod="openstack/placement-db-create-z45dw" Nov 25 17:03:16 crc kubenswrapper[4812]: I1125 17:03:16.099622 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d390e826-1601-43d3-bbf2-1865db1b963f-operator-scripts\") pod \"placement-7b45-account-create-shvlr\" (UID: \"d390e826-1601-43d3-bbf2-1865db1b963f\") " pod="openstack/placement-7b45-account-create-shvlr" Nov 25 17:03:16 crc kubenswrapper[4812]: I1125 17:03:16.099743 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0489df11-52f0-4093-9210-e621ba69425c-operator-scripts\") pod \"placement-db-create-z45dw\" (UID: \"0489df11-52f0-4093-9210-e621ba69425c\") " pod="openstack/placement-db-create-z45dw" Nov 25 17:03:16 crc kubenswrapper[4812]: I1125 17:03:16.099798 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-58jf7\" (UniqueName: \"kubernetes.io/projected/0489df11-52f0-4093-9210-e621ba69425c-kube-api-access-58jf7\") pod \"placement-db-create-z45dw\" (UID: \"0489df11-52f0-4093-9210-e621ba69425c\") " pod="openstack/placement-db-create-z45dw" Nov 25 17:03:16 crc kubenswrapper[4812]: I1125 17:03:16.099868 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cvcsm\" (UniqueName: \"kubernetes.io/projected/d390e826-1601-43d3-bbf2-1865db1b963f-kube-api-access-cvcsm\") pod \"placement-7b45-account-create-shvlr\" (UID: \"d390e826-1601-43d3-bbf2-1865db1b963f\") " pod="openstack/placement-7b45-account-create-shvlr" Nov 25 17:03:16 crc kubenswrapper[4812]: I1125 17:03:16.100422 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0489df11-52f0-4093-9210-e621ba69425c-operator-scripts\") pod \"placement-db-create-z45dw\" (UID: \"0489df11-52f0-4093-9210-e621ba69425c\") " pod="openstack/placement-db-create-z45dw" Nov 25 17:03:16 crc kubenswrapper[4812]: I1125 17:03:16.115230 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-58jf7\" (UniqueName: \"kubernetes.io/projected/0489df11-52f0-4093-9210-e621ba69425c-kube-api-access-58jf7\") pod \"placement-db-create-z45dw\" (UID: \"0489df11-52f0-4093-9210-e621ba69425c\") " pod="openstack/placement-db-create-z45dw" Nov 25 17:03:16 crc kubenswrapper[4812]: I1125 17:03:16.200923 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d390e826-1601-43d3-bbf2-1865db1b963f-operator-scripts\") pod \"placement-7b45-account-create-shvlr\" (UID: \"d390e826-1601-43d3-bbf2-1865db1b963f\") " pod="openstack/placement-7b45-account-create-shvlr" Nov 25 17:03:16 crc kubenswrapper[4812]: I1125 17:03:16.201098 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cvcsm\" (UniqueName: \"kubernetes.io/projected/d390e826-1601-43d3-bbf2-1865db1b963f-kube-api-access-cvcsm\") pod \"placement-7b45-account-create-shvlr\" (UID: \"d390e826-1601-43d3-bbf2-1865db1b963f\") " pod="openstack/placement-7b45-account-create-shvlr" Nov 25 17:03:16 crc kubenswrapper[4812]: I1125 17:03:16.202038 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d390e826-1601-43d3-bbf2-1865db1b963f-operator-scripts\") pod \"placement-7b45-account-create-shvlr\" (UID: \"d390e826-1601-43d3-bbf2-1865db1b963f\") " pod="openstack/placement-7b45-account-create-shvlr" Nov 25 17:03:16 crc kubenswrapper[4812]: I1125 17:03:16.216783 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cvcsm\" (UniqueName: \"kubernetes.io/projected/d390e826-1601-43d3-bbf2-1865db1b963f-kube-api-access-cvcsm\") pod \"placement-7b45-account-create-shvlr\" (UID: \"d390e826-1601-43d3-bbf2-1865db1b963f\") " pod="openstack/placement-7b45-account-create-shvlr" Nov 25 17:03:16 crc kubenswrapper[4812]: I1125 17:03:16.246973 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-z45dw" Nov 25 17:03:16 crc kubenswrapper[4812]: I1125 17:03:16.268498 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-d799-account-create-f4x79"] Nov 25 17:03:16 crc kubenswrapper[4812]: W1125 17:03:16.277470 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8286a549_6491_4255_9332_282eb9297c35.slice/crio-cc1280dfcbd4c7025cced7d969dac8bd5f4268cc926e42c41c5e8a675b2af643 WatchSource:0}: Error finding container cc1280dfcbd4c7025cced7d969dac8bd5f4268cc926e42c41c5e8a675b2af643: Status 404 returned error can't find the container with id cc1280dfcbd4c7025cced7d969dac8bd5f4268cc926e42c41c5e8a675b2af643 Nov 25 17:03:16 crc kubenswrapper[4812]: I1125 17:03:16.310451 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-7b45-account-create-shvlr" Nov 25 17:03:16 crc kubenswrapper[4812]: I1125 17:03:16.348118 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-g9jm8"] Nov 25 17:03:16 crc kubenswrapper[4812]: I1125 17:03:16.684007 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-z45dw"] Nov 25 17:03:16 crc kubenswrapper[4812]: W1125 17:03:16.687345 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0489df11_52f0_4093_9210_e621ba69425c.slice/crio-c9f49d5d29792347470919139ed239006281607e677d314a62fa81ab3d49694d WatchSource:0}: Error finding container c9f49d5d29792347470919139ed239006281607e677d314a62fa81ab3d49694d: Status 404 returned error can't find the container with id c9f49d5d29792347470919139ed239006281607e677d314a62fa81ab3d49694d Nov 25 17:03:16 crc kubenswrapper[4812]: I1125 17:03:16.759476 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-7b45-account-create-shvlr"] Nov 25 17:03:16 crc kubenswrapper[4812]: I1125 17:03:16.898679 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-d799-account-create-f4x79" event={"ID":"8286a549-6491-4255-9332-282eb9297c35","Type":"ContainerStarted","Data":"0f89af6705b07691384a44ad4082cc1f45f2881207fa4382a17d32d394e75ed1"} Nov 25 17:03:16 crc kubenswrapper[4812]: I1125 17:03:16.898743 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-d799-account-create-f4x79" event={"ID":"8286a549-6491-4255-9332-282eb9297c35","Type":"ContainerStarted","Data":"cc1280dfcbd4c7025cced7d969dac8bd5f4268cc926e42c41c5e8a675b2af643"} Nov 25 17:03:16 crc kubenswrapper[4812]: I1125 17:03:16.908482 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7b45-account-create-shvlr" event={"ID":"d390e826-1601-43d3-bbf2-1865db1b963f","Type":"ContainerStarted","Data":"13ab83750ec737fbfa9db4dad5e8f4fbdbfea209bdf9c811bbfba6c9f728c09d"} Nov 25 17:03:16 crc kubenswrapper[4812]: I1125 17:03:16.910800 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-g9jm8" event={"ID":"908bb661-4226-4cb0-8527-b9a93f6048e1","Type":"ContainerStarted","Data":"fc3b21a12b6fac380cbb329b165e6ab778cb58791d7974511dbe80091c62f451"} Nov 25 17:03:16 crc kubenswrapper[4812]: I1125 17:03:16.910824 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-g9jm8" event={"ID":"908bb661-4226-4cb0-8527-b9a93f6048e1","Type":"ContainerStarted","Data":"a91680d5373cf4e276774a5c1a4ed8d777db216942ee30bb4a17431d15e0cc46"} Nov 25 17:03:16 crc kubenswrapper[4812]: I1125 17:03:16.919612 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-d799-account-create-f4x79" podStartSLOduration=1.919591534 podStartE2EDuration="1.919591534s" podCreationTimestamp="2025-11-25 17:03:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:03:16.91420142 +0000 UTC m=+971.754343525" watchObservedRunningTime="2025-11-25 17:03:16.919591534 +0000 UTC m=+971.759733629" Nov 25 17:03:16 crc kubenswrapper[4812]: I1125 17:03:16.932572 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"b67fcd17-39f7-4b26-be95-01a6223ec74d","Type":"ContainerStarted","Data":"7bd89b88d07eb716ae005ef1456c5e43a17625667beac67ed3346dafda8343d1"} Nov 25 17:03:16 crc kubenswrapper[4812]: I1125 17:03:16.934413 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-z45dw" event={"ID":"0489df11-52f0-4093-9210-e621ba69425c","Type":"ContainerStarted","Data":"c9f49d5d29792347470919139ed239006281607e677d314a62fa81ab3d49694d"} Nov 25 17:03:16 crc kubenswrapper[4812]: I1125 17:03:16.937375 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-create-g9jm8" podStartSLOduration=1.9373538689999998 podStartE2EDuration="1.937353869s" podCreationTimestamp="2025-11-25 17:03:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:03:16.929346145 +0000 UTC m=+971.769488240" watchObservedRunningTime="2025-11-25 17:03:16.937353869 +0000 UTC m=+971.777495964" Nov 25 17:03:17 crc kubenswrapper[4812]: I1125 17:03:17.022185 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-q2zpv" Nov 25 17:03:17 crc kubenswrapper[4812]: I1125 17:03:17.114995 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36d8c637-bb40-4f4c-9fdf-56bbface6e35-config\") pod \"36d8c637-bb40-4f4c-9fdf-56bbface6e35\" (UID: \"36d8c637-bb40-4f4c-9fdf-56bbface6e35\") " Nov 25 17:03:17 crc kubenswrapper[4812]: I1125 17:03:17.115045 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vzrrx\" (UniqueName: \"kubernetes.io/projected/36d8c637-bb40-4f4c-9fdf-56bbface6e35-kube-api-access-vzrrx\") pod \"36d8c637-bb40-4f4c-9fdf-56bbface6e35\" (UID: \"36d8c637-bb40-4f4c-9fdf-56bbface6e35\") " Nov 25 17:03:17 crc kubenswrapper[4812]: I1125 17:03:17.115103 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/36d8c637-bb40-4f4c-9fdf-56bbface6e35-ovsdbserver-nb\") pod \"36d8c637-bb40-4f4c-9fdf-56bbface6e35\" (UID: \"36d8c637-bb40-4f4c-9fdf-56bbface6e35\") " Nov 25 17:03:17 crc kubenswrapper[4812]: I1125 17:03:17.115121 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/36d8c637-bb40-4f4c-9fdf-56bbface6e35-dns-svc\") pod \"36d8c637-bb40-4f4c-9fdf-56bbface6e35\" (UID: \"36d8c637-bb40-4f4c-9fdf-56bbface6e35\") " Nov 25 17:03:17 crc kubenswrapper[4812]: I1125 17:03:17.121209 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36d8c637-bb40-4f4c-9fdf-56bbface6e35-kube-api-access-vzrrx" (OuterVolumeSpecName: "kube-api-access-vzrrx") pod "36d8c637-bb40-4f4c-9fdf-56bbface6e35" (UID: "36d8c637-bb40-4f4c-9fdf-56bbface6e35"). InnerVolumeSpecName "kube-api-access-vzrrx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:03:17 crc kubenswrapper[4812]: I1125 17:03:17.156821 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36d8c637-bb40-4f4c-9fdf-56bbface6e35-config" (OuterVolumeSpecName: "config") pod "36d8c637-bb40-4f4c-9fdf-56bbface6e35" (UID: "36d8c637-bb40-4f4c-9fdf-56bbface6e35"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:03:17 crc kubenswrapper[4812]: I1125 17:03:17.157604 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36d8c637-bb40-4f4c-9fdf-56bbface6e35-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "36d8c637-bb40-4f4c-9fdf-56bbface6e35" (UID: "36d8c637-bb40-4f4c-9fdf-56bbface6e35"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:03:17 crc kubenswrapper[4812]: I1125 17:03:17.159615 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36d8c637-bb40-4f4c-9fdf-56bbface6e35-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "36d8c637-bb40-4f4c-9fdf-56bbface6e35" (UID: "36d8c637-bb40-4f4c-9fdf-56bbface6e35"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:03:17 crc kubenswrapper[4812]: I1125 17:03:17.216817 4812 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36d8c637-bb40-4f4c-9fdf-56bbface6e35-config\") on node \"crc\" DevicePath \"\"" Nov 25 17:03:17 crc kubenswrapper[4812]: I1125 17:03:17.216852 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vzrrx\" (UniqueName: \"kubernetes.io/projected/36d8c637-bb40-4f4c-9fdf-56bbface6e35-kube-api-access-vzrrx\") on node \"crc\" DevicePath \"\"" Nov 25 17:03:17 crc kubenswrapper[4812]: I1125 17:03:17.216866 4812 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/36d8c637-bb40-4f4c-9fdf-56bbface6e35-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 17:03:17 crc kubenswrapper[4812]: I1125 17:03:17.216875 4812 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/36d8c637-bb40-4f4c-9fdf-56bbface6e35-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 17:03:17 crc kubenswrapper[4812]: I1125 17:03:17.282195 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 25 17:03:17 crc kubenswrapper[4812]: I1125 17:03:17.345058 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 25 17:03:17 crc kubenswrapper[4812]: I1125 17:03:17.539464 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 25 17:03:17 crc kubenswrapper[4812]: I1125 17:03:17.944108 4812 generic.go:334] "Generic (PLEG): container finished" podID="d390e826-1601-43d3-bbf2-1865db1b963f" containerID="5b005913fa94458146c1e7a30fa43a961c9fa4907793a88d4e5c5e79f095304d" exitCode=0 Nov 25 17:03:17 crc kubenswrapper[4812]: I1125 17:03:17.944165 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7b45-account-create-shvlr" event={"ID":"d390e826-1601-43d3-bbf2-1865db1b963f","Type":"ContainerDied","Data":"5b005913fa94458146c1e7a30fa43a961c9fa4907793a88d4e5c5e79f095304d"} Nov 25 17:03:17 crc kubenswrapper[4812]: I1125 17:03:17.945927 4812 generic.go:334] "Generic (PLEG): container finished" podID="908bb661-4226-4cb0-8527-b9a93f6048e1" containerID="fc3b21a12b6fac380cbb329b165e6ab778cb58791d7974511dbe80091c62f451" exitCode=0 Nov 25 17:03:17 crc kubenswrapper[4812]: I1125 17:03:17.945999 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-g9jm8" event={"ID":"908bb661-4226-4cb0-8527-b9a93f6048e1","Type":"ContainerDied","Data":"fc3b21a12b6fac380cbb329b165e6ab778cb58791d7974511dbe80091c62f451"} Nov 25 17:03:17 crc kubenswrapper[4812]: I1125 17:03:17.947704 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"b67fcd17-39f7-4b26-be95-01a6223ec74d","Type":"ContainerStarted","Data":"03c0448b2e1b5737de7d3d1cd115b26019dd1926033ec136c0be5d7648bbbd3a"} Nov 25 17:03:17 crc kubenswrapper[4812]: I1125 17:03:17.947852 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 25 17:03:17 crc kubenswrapper[4812]: I1125 17:03:17.949316 4812 generic.go:334] "Generic (PLEG): container finished" podID="0489df11-52f0-4093-9210-e621ba69425c" containerID="e21af590e7ad85ab22d50cbb7c8099cb44f8db9dd2224107c0c27007c406b501" exitCode=0 Nov 25 17:03:17 crc kubenswrapper[4812]: I1125 17:03:17.949365 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-z45dw" event={"ID":"0489df11-52f0-4093-9210-e621ba69425c","Type":"ContainerDied","Data":"e21af590e7ad85ab22d50cbb7c8099cb44f8db9dd2224107c0c27007c406b501"} Nov 25 17:03:17 crc kubenswrapper[4812]: I1125 17:03:17.950774 4812 generic.go:334] "Generic (PLEG): container finished" podID="8286a549-6491-4255-9332-282eb9297c35" containerID="0f89af6705b07691384a44ad4082cc1f45f2881207fa4382a17d32d394e75ed1" exitCode=0 Nov 25 17:03:17 crc kubenswrapper[4812]: I1125 17:03:17.950808 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-d799-account-create-f4x79" event={"ID":"8286a549-6491-4255-9332-282eb9297c35","Type":"ContainerDied","Data":"0f89af6705b07691384a44ad4082cc1f45f2881207fa4382a17d32d394e75ed1"} Nov 25 17:03:17 crc kubenswrapper[4812]: I1125 17:03:17.953393 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-q2zpv" event={"ID":"36d8c637-bb40-4f4c-9fdf-56bbface6e35","Type":"ContainerDied","Data":"4fafe168562b848b2c73cbe708be526fd2f5add22884d0e2c000b6b50346ea90"} Nov 25 17:03:17 crc kubenswrapper[4812]: I1125 17:03:17.953446 4812 scope.go:117] "RemoveContainer" containerID="f32ec7b1798fdb8f63086ef38771c208f7116c8f13241b5f1454dd0ba56cd7ad" Nov 25 17:03:17 crc kubenswrapper[4812]: I1125 17:03:17.953447 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-q2zpv" Nov 25 17:03:17 crc kubenswrapper[4812]: I1125 17:03:17.976183 4812 scope.go:117] "RemoveContainer" containerID="250e3e2e812a28ee2f95d9cfb7a437c0358a3e903f5ec3e2f834bf41771a2a34" Nov 25 17:03:17 crc kubenswrapper[4812]: I1125 17:03:17.988848 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=6.721083768 podStartE2EDuration="9.988834501s" podCreationTimestamp="2025-11-25 17:03:08 +0000 UTC" firstStartedPulling="2025-11-25 17:03:09.793063433 +0000 UTC m=+964.633205528" lastFinishedPulling="2025-11-25 17:03:13.060814166 +0000 UTC m=+967.900956261" observedRunningTime="2025-11-25 17:03:17.987020233 +0000 UTC m=+972.827162338" watchObservedRunningTime="2025-11-25 17:03:17.988834501 +0000 UTC m=+972.828976596" Nov 25 17:03:18 crc kubenswrapper[4812]: I1125 17:03:18.058197 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-q2zpv"] Nov 25 17:03:18 crc kubenswrapper[4812]: I1125 17:03:18.070279 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-q2zpv"] Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.280096 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-z45dw" Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.408045 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0489df11-52f0-4093-9210-e621ba69425c-operator-scripts\") pod \"0489df11-52f0-4093-9210-e621ba69425c\" (UID: \"0489df11-52f0-4093-9210-e621ba69425c\") " Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.408174 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-58jf7\" (UniqueName: \"kubernetes.io/projected/0489df11-52f0-4093-9210-e621ba69425c-kube-api-access-58jf7\") pod \"0489df11-52f0-4093-9210-e621ba69425c\" (UID: \"0489df11-52f0-4093-9210-e621ba69425c\") " Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.409270 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0489df11-52f0-4093-9210-e621ba69425c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0489df11-52f0-4093-9210-e621ba69425c" (UID: "0489df11-52f0-4093-9210-e621ba69425c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.413079 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-d799-account-create-f4x79" Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.419631 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0489df11-52f0-4093-9210-e621ba69425c-kube-api-access-58jf7" (OuterVolumeSpecName: "kube-api-access-58jf7") pod "0489df11-52f0-4093-9210-e621ba69425c" (UID: "0489df11-52f0-4093-9210-e621ba69425c"). InnerVolumeSpecName "kube-api-access-58jf7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.421294 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-g9jm8" Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.455991 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-7b45-account-create-shvlr" Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.510042 4812 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0489df11-52f0-4093-9210-e621ba69425c-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.510612 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-58jf7\" (UniqueName: \"kubernetes.io/projected/0489df11-52f0-4093-9210-e621ba69425c-kube-api-access-58jf7\") on node \"crc\" DevicePath \"\"" Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.611641 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d390e826-1601-43d3-bbf2-1865db1b963f-operator-scripts\") pod \"d390e826-1601-43d3-bbf2-1865db1b963f\" (UID: \"d390e826-1601-43d3-bbf2-1865db1b963f\") " Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.611737 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8286a549-6491-4255-9332-282eb9297c35-operator-scripts\") pod \"8286a549-6491-4255-9332-282eb9297c35\" (UID: \"8286a549-6491-4255-9332-282eb9297c35\") " Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.611779 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-chvvd\" (UniqueName: \"kubernetes.io/projected/8286a549-6491-4255-9332-282eb9297c35-kube-api-access-chvvd\") pod \"8286a549-6491-4255-9332-282eb9297c35\" (UID: \"8286a549-6491-4255-9332-282eb9297c35\") " Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.611843 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cvcsm\" (UniqueName: \"kubernetes.io/projected/d390e826-1601-43d3-bbf2-1865db1b963f-kube-api-access-cvcsm\") pod \"d390e826-1601-43d3-bbf2-1865db1b963f\" (UID: \"d390e826-1601-43d3-bbf2-1865db1b963f\") " Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.611927 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zqkv8\" (UniqueName: \"kubernetes.io/projected/908bb661-4226-4cb0-8527-b9a93f6048e1-kube-api-access-zqkv8\") pod \"908bb661-4226-4cb0-8527-b9a93f6048e1\" (UID: \"908bb661-4226-4cb0-8527-b9a93f6048e1\") " Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.611959 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/908bb661-4226-4cb0-8527-b9a93f6048e1-operator-scripts\") pod \"908bb661-4226-4cb0-8527-b9a93f6048e1\" (UID: \"908bb661-4226-4cb0-8527-b9a93f6048e1\") " Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.612201 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8286a549-6491-4255-9332-282eb9297c35-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8286a549-6491-4255-9332-282eb9297c35" (UID: "8286a549-6491-4255-9332-282eb9297c35"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.612199 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d390e826-1601-43d3-bbf2-1865db1b963f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d390e826-1601-43d3-bbf2-1865db1b963f" (UID: "d390e826-1601-43d3-bbf2-1865db1b963f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.612633 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/908bb661-4226-4cb0-8527-b9a93f6048e1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "908bb661-4226-4cb0-8527-b9a93f6048e1" (UID: "908bb661-4226-4cb0-8527-b9a93f6048e1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.615035 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d390e826-1601-43d3-bbf2-1865db1b963f-kube-api-access-cvcsm" (OuterVolumeSpecName: "kube-api-access-cvcsm") pod "d390e826-1601-43d3-bbf2-1865db1b963f" (UID: "d390e826-1601-43d3-bbf2-1865db1b963f"). InnerVolumeSpecName "kube-api-access-cvcsm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.615662 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8286a549-6491-4255-9332-282eb9297c35-kube-api-access-chvvd" (OuterVolumeSpecName: "kube-api-access-chvvd") pod "8286a549-6491-4255-9332-282eb9297c35" (UID: "8286a549-6491-4255-9332-282eb9297c35"). InnerVolumeSpecName "kube-api-access-chvvd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.616921 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/908bb661-4226-4cb0-8527-b9a93f6048e1-kube-api-access-zqkv8" (OuterVolumeSpecName: "kube-api-access-zqkv8") pod "908bb661-4226-4cb0-8527-b9a93f6048e1" (UID: "908bb661-4226-4cb0-8527-b9a93f6048e1"). InnerVolumeSpecName "kube-api-access-zqkv8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.713920 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cvcsm\" (UniqueName: \"kubernetes.io/projected/d390e826-1601-43d3-bbf2-1865db1b963f-kube-api-access-cvcsm\") on node \"crc\" DevicePath \"\"" Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.713964 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zqkv8\" (UniqueName: \"kubernetes.io/projected/908bb661-4226-4cb0-8527-b9a93f6048e1-kube-api-access-zqkv8\") on node \"crc\" DevicePath \"\"" Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.713977 4812 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/908bb661-4226-4cb0-8527-b9a93f6048e1-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.713989 4812 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d390e826-1601-43d3-bbf2-1865db1b963f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.714000 4812 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8286a549-6491-4255-9332-282eb9297c35-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.714011 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-chvvd\" (UniqueName: \"kubernetes.io/projected/8286a549-6491-4255-9332-282eb9297c35-kube-api-access-chvvd\") on node \"crc\" DevicePath \"\"" Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.846066 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="36d8c637-bb40-4f4c-9fdf-56bbface6e35" path="/var/lib/kubelet/pods/36d8c637-bb40-4f4c-9fdf-56bbface6e35/volumes" Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.971080 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-g9jm8" event={"ID":"908bb661-4226-4cb0-8527-b9a93f6048e1","Type":"ContainerDied","Data":"a91680d5373cf4e276774a5c1a4ed8d777db216942ee30bb4a17431d15e0cc46"} Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.971134 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a91680d5373cf4e276774a5c1a4ed8d777db216942ee30bb4a17431d15e0cc46" Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.971094 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-g9jm8" Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.974353 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-z45dw" event={"ID":"0489df11-52f0-4093-9210-e621ba69425c","Type":"ContainerDied","Data":"c9f49d5d29792347470919139ed239006281607e677d314a62fa81ab3d49694d"} Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.974403 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c9f49d5d29792347470919139ed239006281607e677d314a62fa81ab3d49694d" Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.974367 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-z45dw" Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.977249 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-d799-account-create-f4x79" event={"ID":"8286a549-6491-4255-9332-282eb9297c35","Type":"ContainerDied","Data":"cc1280dfcbd4c7025cced7d969dac8bd5f4268cc926e42c41c5e8a675b2af643"} Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.977418 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cc1280dfcbd4c7025cced7d969dac8bd5f4268cc926e42c41c5e8a675b2af643" Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.977262 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-d799-account-create-f4x79" Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.978752 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7b45-account-create-shvlr" event={"ID":"d390e826-1601-43d3-bbf2-1865db1b963f","Type":"ContainerDied","Data":"13ab83750ec737fbfa9db4dad5e8f4fbdbfea209bdf9c811bbfba6c9f728c09d"} Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.978788 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="13ab83750ec737fbfa9db4dad5e8f4fbdbfea209bdf9c811bbfba6c9f728c09d" Nov 25 17:03:19 crc kubenswrapper[4812]: I1125 17:03:19.978920 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-7b45-account-create-shvlr" Nov 25 17:03:20 crc kubenswrapper[4812]: I1125 17:03:20.989286 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-9wkjm"] Nov 25 17:03:20 crc kubenswrapper[4812]: E1125 17:03:20.989990 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36d8c637-bb40-4f4c-9fdf-56bbface6e35" containerName="dnsmasq-dns" Nov 25 17:03:20 crc kubenswrapper[4812]: I1125 17:03:20.990005 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="36d8c637-bb40-4f4c-9fdf-56bbface6e35" containerName="dnsmasq-dns" Nov 25 17:03:20 crc kubenswrapper[4812]: E1125 17:03:20.990027 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d390e826-1601-43d3-bbf2-1865db1b963f" containerName="mariadb-account-create" Nov 25 17:03:20 crc kubenswrapper[4812]: I1125 17:03:20.990035 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="d390e826-1601-43d3-bbf2-1865db1b963f" containerName="mariadb-account-create" Nov 25 17:03:20 crc kubenswrapper[4812]: E1125 17:03:20.990056 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36d8c637-bb40-4f4c-9fdf-56bbface6e35" containerName="init" Nov 25 17:03:20 crc kubenswrapper[4812]: I1125 17:03:20.990063 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="36d8c637-bb40-4f4c-9fdf-56bbface6e35" containerName="init" Nov 25 17:03:20 crc kubenswrapper[4812]: E1125 17:03:20.990084 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0489df11-52f0-4093-9210-e621ba69425c" containerName="mariadb-database-create" Nov 25 17:03:20 crc kubenswrapper[4812]: I1125 17:03:20.990092 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="0489df11-52f0-4093-9210-e621ba69425c" containerName="mariadb-database-create" Nov 25 17:03:20 crc kubenswrapper[4812]: E1125 17:03:20.990102 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8286a549-6491-4255-9332-282eb9297c35" containerName="mariadb-account-create" Nov 25 17:03:20 crc kubenswrapper[4812]: I1125 17:03:20.990111 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="8286a549-6491-4255-9332-282eb9297c35" containerName="mariadb-account-create" Nov 25 17:03:20 crc kubenswrapper[4812]: E1125 17:03:20.990120 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="908bb661-4226-4cb0-8527-b9a93f6048e1" containerName="mariadb-database-create" Nov 25 17:03:20 crc kubenswrapper[4812]: I1125 17:03:20.990128 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="908bb661-4226-4cb0-8527-b9a93f6048e1" containerName="mariadb-database-create" Nov 25 17:03:20 crc kubenswrapper[4812]: I1125 17:03:20.990325 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="36d8c637-bb40-4f4c-9fdf-56bbface6e35" containerName="dnsmasq-dns" Nov 25 17:03:20 crc kubenswrapper[4812]: I1125 17:03:20.990342 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="d390e826-1601-43d3-bbf2-1865db1b963f" containerName="mariadb-account-create" Nov 25 17:03:20 crc kubenswrapper[4812]: I1125 17:03:20.990366 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="908bb661-4226-4cb0-8527-b9a93f6048e1" containerName="mariadb-database-create" Nov 25 17:03:20 crc kubenswrapper[4812]: I1125 17:03:20.990378 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="0489df11-52f0-4093-9210-e621ba69425c" containerName="mariadb-database-create" Nov 25 17:03:20 crc kubenswrapper[4812]: I1125 17:03:20.990390 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="8286a549-6491-4255-9332-282eb9297c35" containerName="mariadb-account-create" Nov 25 17:03:20 crc kubenswrapper[4812]: I1125 17:03:20.991004 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-9wkjm" Nov 25 17:03:21 crc kubenswrapper[4812]: I1125 17:03:21.000733 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-9wkjm"] Nov 25 17:03:21 crc kubenswrapper[4812]: I1125 17:03:21.033494 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jrkc2\" (UniqueName: \"kubernetes.io/projected/45a126f9-1bc4-4142-aed5-170f467d104a-kube-api-access-jrkc2\") pod \"glance-db-create-9wkjm\" (UID: \"45a126f9-1bc4-4142-aed5-170f467d104a\") " pod="openstack/glance-db-create-9wkjm" Nov 25 17:03:21 crc kubenswrapper[4812]: I1125 17:03:21.033663 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/45a126f9-1bc4-4142-aed5-170f467d104a-operator-scripts\") pod \"glance-db-create-9wkjm\" (UID: \"45a126f9-1bc4-4142-aed5-170f467d104a\") " pod="openstack/glance-db-create-9wkjm" Nov 25 17:03:21 crc kubenswrapper[4812]: I1125 17:03:21.134848 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/45a126f9-1bc4-4142-aed5-170f467d104a-operator-scripts\") pod \"glance-db-create-9wkjm\" (UID: \"45a126f9-1bc4-4142-aed5-170f467d104a\") " pod="openstack/glance-db-create-9wkjm" Nov 25 17:03:21 crc kubenswrapper[4812]: I1125 17:03:21.134953 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jrkc2\" (UniqueName: \"kubernetes.io/projected/45a126f9-1bc4-4142-aed5-170f467d104a-kube-api-access-jrkc2\") pod \"glance-db-create-9wkjm\" (UID: \"45a126f9-1bc4-4142-aed5-170f467d104a\") " pod="openstack/glance-db-create-9wkjm" Nov 25 17:03:21 crc kubenswrapper[4812]: I1125 17:03:21.135583 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/45a126f9-1bc4-4142-aed5-170f467d104a-operator-scripts\") pod \"glance-db-create-9wkjm\" (UID: \"45a126f9-1bc4-4142-aed5-170f467d104a\") " pod="openstack/glance-db-create-9wkjm" Nov 25 17:03:21 crc kubenswrapper[4812]: I1125 17:03:21.149788 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jrkc2\" (UniqueName: \"kubernetes.io/projected/45a126f9-1bc4-4142-aed5-170f467d104a-kube-api-access-jrkc2\") pod \"glance-db-create-9wkjm\" (UID: \"45a126f9-1bc4-4142-aed5-170f467d104a\") " pod="openstack/glance-db-create-9wkjm" Nov 25 17:03:21 crc kubenswrapper[4812]: I1125 17:03:21.204274 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-da6c-account-create-nz8bg"] Nov 25 17:03:21 crc kubenswrapper[4812]: I1125 17:03:21.205481 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-da6c-account-create-nz8bg" Nov 25 17:03:21 crc kubenswrapper[4812]: I1125 17:03:21.207498 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 25 17:03:21 crc kubenswrapper[4812]: I1125 17:03:21.221729 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-da6c-account-create-nz8bg"] Nov 25 17:03:21 crc kubenswrapper[4812]: I1125 17:03:21.305653 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-9wkjm" Nov 25 17:03:21 crc kubenswrapper[4812]: I1125 17:03:21.338289 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qfqj7\" (UniqueName: \"kubernetes.io/projected/7335e288-40ba-43f9-b713-ae2d0b6fea48-kube-api-access-qfqj7\") pod \"glance-da6c-account-create-nz8bg\" (UID: \"7335e288-40ba-43f9-b713-ae2d0b6fea48\") " pod="openstack/glance-da6c-account-create-nz8bg" Nov 25 17:03:21 crc kubenswrapper[4812]: I1125 17:03:21.338695 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7335e288-40ba-43f9-b713-ae2d0b6fea48-operator-scripts\") pod \"glance-da6c-account-create-nz8bg\" (UID: \"7335e288-40ba-43f9-b713-ae2d0b6fea48\") " pod="openstack/glance-da6c-account-create-nz8bg" Nov 25 17:03:21 crc kubenswrapper[4812]: I1125 17:03:21.443217 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qfqj7\" (UniqueName: \"kubernetes.io/projected/7335e288-40ba-43f9-b713-ae2d0b6fea48-kube-api-access-qfqj7\") pod \"glance-da6c-account-create-nz8bg\" (UID: \"7335e288-40ba-43f9-b713-ae2d0b6fea48\") " pod="openstack/glance-da6c-account-create-nz8bg" Nov 25 17:03:21 crc kubenswrapper[4812]: I1125 17:03:21.443663 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7335e288-40ba-43f9-b713-ae2d0b6fea48-operator-scripts\") pod \"glance-da6c-account-create-nz8bg\" (UID: \"7335e288-40ba-43f9-b713-ae2d0b6fea48\") " pod="openstack/glance-da6c-account-create-nz8bg" Nov 25 17:03:21 crc kubenswrapper[4812]: I1125 17:03:21.444594 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7335e288-40ba-43f9-b713-ae2d0b6fea48-operator-scripts\") pod \"glance-da6c-account-create-nz8bg\" (UID: \"7335e288-40ba-43f9-b713-ae2d0b6fea48\") " pod="openstack/glance-da6c-account-create-nz8bg" Nov 25 17:03:21 crc kubenswrapper[4812]: I1125 17:03:21.462266 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qfqj7\" (UniqueName: \"kubernetes.io/projected/7335e288-40ba-43f9-b713-ae2d0b6fea48-kube-api-access-qfqj7\") pod \"glance-da6c-account-create-nz8bg\" (UID: \"7335e288-40ba-43f9-b713-ae2d0b6fea48\") " pod="openstack/glance-da6c-account-create-nz8bg" Nov 25 17:03:21 crc kubenswrapper[4812]: I1125 17:03:21.537398 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-da6c-account-create-nz8bg" Nov 25 17:03:21 crc kubenswrapper[4812]: I1125 17:03:21.733216 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-9wkjm"] Nov 25 17:03:21 crc kubenswrapper[4812]: W1125 17:03:21.736133 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod45a126f9_1bc4_4142_aed5_170f467d104a.slice/crio-931dc63b9430f2f03bad1bcea09162614cb373cf927194219fd1e42a68bab5b8 WatchSource:0}: Error finding container 931dc63b9430f2f03bad1bcea09162614cb373cf927194219fd1e42a68bab5b8: Status 404 returned error can't find the container with id 931dc63b9430f2f03bad1bcea09162614cb373cf927194219fd1e42a68bab5b8 Nov 25 17:03:21 crc kubenswrapper[4812]: I1125 17:03:21.939434 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-da6c-account-create-nz8bg"] Nov 25 17:03:21 crc kubenswrapper[4812]: W1125 17:03:21.945940 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7335e288_40ba_43f9_b713_ae2d0b6fea48.slice/crio-fa7ff06323d50b64d30582a852ccf55f31ddaac0590d518bab5be3721e5a5673 WatchSource:0}: Error finding container fa7ff06323d50b64d30582a852ccf55f31ddaac0590d518bab5be3721e5a5673: Status 404 returned error can't find the container with id fa7ff06323d50b64d30582a852ccf55f31ddaac0590d518bab5be3721e5a5673 Nov 25 17:03:21 crc kubenswrapper[4812]: I1125 17:03:21.996185 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-da6c-account-create-nz8bg" event={"ID":"7335e288-40ba-43f9-b713-ae2d0b6fea48","Type":"ContainerStarted","Data":"fa7ff06323d50b64d30582a852ccf55f31ddaac0590d518bab5be3721e5a5673"} Nov 25 17:03:22 crc kubenswrapper[4812]: I1125 17:03:21.997951 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-9wkjm" event={"ID":"45a126f9-1bc4-4142-aed5-170f467d104a","Type":"ContainerStarted","Data":"76db7f2d4c1ce2bca264f19aa7ad15682e585bb1963fd8276e0622f501956be0"} Nov 25 17:03:22 crc kubenswrapper[4812]: I1125 17:03:21.998002 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-9wkjm" event={"ID":"45a126f9-1bc4-4142-aed5-170f467d104a","Type":"ContainerStarted","Data":"931dc63b9430f2f03bad1bcea09162614cb373cf927194219fd1e42a68bab5b8"} Nov 25 17:03:22 crc kubenswrapper[4812]: I1125 17:03:22.016802 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-create-9wkjm" podStartSLOduration=2.016785879 podStartE2EDuration="2.016785879s" podCreationTimestamp="2025-11-25 17:03:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:03:22.011168558 +0000 UTC m=+976.851310653" watchObservedRunningTime="2025-11-25 17:03:22.016785879 +0000 UTC m=+976.856927974" Nov 25 17:03:23 crc kubenswrapper[4812]: I1125 17:03:23.007632 4812 generic.go:334] "Generic (PLEG): container finished" podID="7335e288-40ba-43f9-b713-ae2d0b6fea48" containerID="c6af31ffe01216181fa5ea3dfda17123caeb566808fb13b76602fbf47d9210ca" exitCode=0 Nov 25 17:03:23 crc kubenswrapper[4812]: I1125 17:03:23.007709 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-da6c-account-create-nz8bg" event={"ID":"7335e288-40ba-43f9-b713-ae2d0b6fea48","Type":"ContainerDied","Data":"c6af31ffe01216181fa5ea3dfda17123caeb566808fb13b76602fbf47d9210ca"} Nov 25 17:03:23 crc kubenswrapper[4812]: I1125 17:03:23.009849 4812 generic.go:334] "Generic (PLEG): container finished" podID="45a126f9-1bc4-4142-aed5-170f467d104a" containerID="76db7f2d4c1ce2bca264f19aa7ad15682e585bb1963fd8276e0622f501956be0" exitCode=0 Nov 25 17:03:23 crc kubenswrapper[4812]: I1125 17:03:23.009882 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-9wkjm" event={"ID":"45a126f9-1bc4-4142-aed5-170f467d104a","Type":"ContainerDied","Data":"76db7f2d4c1ce2bca264f19aa7ad15682e585bb1963fd8276e0622f501956be0"} Nov 25 17:03:24 crc kubenswrapper[4812]: I1125 17:03:24.369356 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-da6c-account-create-nz8bg" Nov 25 17:03:24 crc kubenswrapper[4812]: I1125 17:03:24.377340 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-9wkjm" Nov 25 17:03:24 crc kubenswrapper[4812]: I1125 17:03:24.384904 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qfqj7\" (UniqueName: \"kubernetes.io/projected/7335e288-40ba-43f9-b713-ae2d0b6fea48-kube-api-access-qfqj7\") pod \"7335e288-40ba-43f9-b713-ae2d0b6fea48\" (UID: \"7335e288-40ba-43f9-b713-ae2d0b6fea48\") " Nov 25 17:03:24 crc kubenswrapper[4812]: I1125 17:03:24.385032 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7335e288-40ba-43f9-b713-ae2d0b6fea48-operator-scripts\") pod \"7335e288-40ba-43f9-b713-ae2d0b6fea48\" (UID: \"7335e288-40ba-43f9-b713-ae2d0b6fea48\") " Nov 25 17:03:24 crc kubenswrapper[4812]: I1125 17:03:24.385086 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jrkc2\" (UniqueName: \"kubernetes.io/projected/45a126f9-1bc4-4142-aed5-170f467d104a-kube-api-access-jrkc2\") pod \"45a126f9-1bc4-4142-aed5-170f467d104a\" (UID: \"45a126f9-1bc4-4142-aed5-170f467d104a\") " Nov 25 17:03:24 crc kubenswrapper[4812]: I1125 17:03:24.385158 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/45a126f9-1bc4-4142-aed5-170f467d104a-operator-scripts\") pod \"45a126f9-1bc4-4142-aed5-170f467d104a\" (UID: \"45a126f9-1bc4-4142-aed5-170f467d104a\") " Nov 25 17:03:24 crc kubenswrapper[4812]: I1125 17:03:24.385849 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7335e288-40ba-43f9-b713-ae2d0b6fea48-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7335e288-40ba-43f9-b713-ae2d0b6fea48" (UID: "7335e288-40ba-43f9-b713-ae2d0b6fea48"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:03:24 crc kubenswrapper[4812]: I1125 17:03:24.385865 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45a126f9-1bc4-4142-aed5-170f467d104a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "45a126f9-1bc4-4142-aed5-170f467d104a" (UID: "45a126f9-1bc4-4142-aed5-170f467d104a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:03:24 crc kubenswrapper[4812]: I1125 17:03:24.390935 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7335e288-40ba-43f9-b713-ae2d0b6fea48-kube-api-access-qfqj7" (OuterVolumeSpecName: "kube-api-access-qfqj7") pod "7335e288-40ba-43f9-b713-ae2d0b6fea48" (UID: "7335e288-40ba-43f9-b713-ae2d0b6fea48"). InnerVolumeSpecName "kube-api-access-qfqj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:03:24 crc kubenswrapper[4812]: I1125 17:03:24.391058 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45a126f9-1bc4-4142-aed5-170f467d104a-kube-api-access-jrkc2" (OuterVolumeSpecName: "kube-api-access-jrkc2") pod "45a126f9-1bc4-4142-aed5-170f467d104a" (UID: "45a126f9-1bc4-4142-aed5-170f467d104a"). InnerVolumeSpecName "kube-api-access-jrkc2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:03:24 crc kubenswrapper[4812]: I1125 17:03:24.487687 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jrkc2\" (UniqueName: \"kubernetes.io/projected/45a126f9-1bc4-4142-aed5-170f467d104a-kube-api-access-jrkc2\") on node \"crc\" DevicePath \"\"" Nov 25 17:03:24 crc kubenswrapper[4812]: I1125 17:03:24.487732 4812 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/45a126f9-1bc4-4142-aed5-170f467d104a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:03:24 crc kubenswrapper[4812]: I1125 17:03:24.487746 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qfqj7\" (UniqueName: \"kubernetes.io/projected/7335e288-40ba-43f9-b713-ae2d0b6fea48-kube-api-access-qfqj7\") on node \"crc\" DevicePath \"\"" Nov 25 17:03:24 crc kubenswrapper[4812]: I1125 17:03:24.487760 4812 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7335e288-40ba-43f9-b713-ae2d0b6fea48-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:03:25 crc kubenswrapper[4812]: I1125 17:03:25.047106 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-da6c-account-create-nz8bg" event={"ID":"7335e288-40ba-43f9-b713-ae2d0b6fea48","Type":"ContainerDied","Data":"fa7ff06323d50b64d30582a852ccf55f31ddaac0590d518bab5be3721e5a5673"} Nov 25 17:03:25 crc kubenswrapper[4812]: I1125 17:03:25.047505 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fa7ff06323d50b64d30582a852ccf55f31ddaac0590d518bab5be3721e5a5673" Nov 25 17:03:25 crc kubenswrapper[4812]: I1125 17:03:25.047608 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-da6c-account-create-nz8bg" Nov 25 17:03:25 crc kubenswrapper[4812]: I1125 17:03:25.051052 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-9wkjm" event={"ID":"45a126f9-1bc4-4142-aed5-170f467d104a","Type":"ContainerDied","Data":"931dc63b9430f2f03bad1bcea09162614cb373cf927194219fd1e42a68bab5b8"} Nov 25 17:03:25 crc kubenswrapper[4812]: I1125 17:03:25.051155 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="931dc63b9430f2f03bad1bcea09162614cb373cf927194219fd1e42a68bab5b8" Nov 25 17:03:25 crc kubenswrapper[4812]: I1125 17:03:25.051270 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-9wkjm" Nov 25 17:03:26 crc kubenswrapper[4812]: I1125 17:03:26.351139 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-n9rp2"] Nov 25 17:03:26 crc kubenswrapper[4812]: E1125 17:03:26.351514 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45a126f9-1bc4-4142-aed5-170f467d104a" containerName="mariadb-database-create" Nov 25 17:03:26 crc kubenswrapper[4812]: I1125 17:03:26.351546 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="45a126f9-1bc4-4142-aed5-170f467d104a" containerName="mariadb-database-create" Nov 25 17:03:26 crc kubenswrapper[4812]: E1125 17:03:26.351567 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7335e288-40ba-43f9-b713-ae2d0b6fea48" containerName="mariadb-account-create" Nov 25 17:03:26 crc kubenswrapper[4812]: I1125 17:03:26.351575 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="7335e288-40ba-43f9-b713-ae2d0b6fea48" containerName="mariadb-account-create" Nov 25 17:03:26 crc kubenswrapper[4812]: I1125 17:03:26.351759 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="45a126f9-1bc4-4142-aed5-170f467d104a" containerName="mariadb-database-create" Nov 25 17:03:26 crc kubenswrapper[4812]: I1125 17:03:26.351778 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="7335e288-40ba-43f9-b713-ae2d0b6fea48" containerName="mariadb-account-create" Nov 25 17:03:26 crc kubenswrapper[4812]: I1125 17:03:26.352398 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-n9rp2" Nov 25 17:03:26 crc kubenswrapper[4812]: I1125 17:03:26.355970 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-9djh4" Nov 25 17:03:26 crc kubenswrapper[4812]: I1125 17:03:26.356044 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 25 17:03:26 crc kubenswrapper[4812]: I1125 17:03:26.358583 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-n9rp2"] Nov 25 17:03:26 crc kubenswrapper[4812]: I1125 17:03:26.415280 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c9a938f-a4f9-4739-837a-928721d40a65-config-data\") pod \"glance-db-sync-n9rp2\" (UID: \"9c9a938f-a4f9-4739-837a-928721d40a65\") " pod="openstack/glance-db-sync-n9rp2" Nov 25 17:03:26 crc kubenswrapper[4812]: I1125 17:03:26.415346 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/9c9a938f-a4f9-4739-837a-928721d40a65-db-sync-config-data\") pod \"glance-db-sync-n9rp2\" (UID: \"9c9a938f-a4f9-4739-837a-928721d40a65\") " pod="openstack/glance-db-sync-n9rp2" Nov 25 17:03:26 crc kubenswrapper[4812]: I1125 17:03:26.415401 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-krnrg\" (UniqueName: \"kubernetes.io/projected/9c9a938f-a4f9-4739-837a-928721d40a65-kube-api-access-krnrg\") pod \"glance-db-sync-n9rp2\" (UID: \"9c9a938f-a4f9-4739-837a-928721d40a65\") " pod="openstack/glance-db-sync-n9rp2" Nov 25 17:03:26 crc kubenswrapper[4812]: I1125 17:03:26.415425 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c9a938f-a4f9-4739-837a-928721d40a65-combined-ca-bundle\") pod \"glance-db-sync-n9rp2\" (UID: \"9c9a938f-a4f9-4739-837a-928721d40a65\") " pod="openstack/glance-db-sync-n9rp2" Nov 25 17:03:26 crc kubenswrapper[4812]: I1125 17:03:26.516378 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c9a938f-a4f9-4739-837a-928721d40a65-config-data\") pod \"glance-db-sync-n9rp2\" (UID: \"9c9a938f-a4f9-4739-837a-928721d40a65\") " pod="openstack/glance-db-sync-n9rp2" Nov 25 17:03:26 crc kubenswrapper[4812]: I1125 17:03:26.516441 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/9c9a938f-a4f9-4739-837a-928721d40a65-db-sync-config-data\") pod \"glance-db-sync-n9rp2\" (UID: \"9c9a938f-a4f9-4739-837a-928721d40a65\") " pod="openstack/glance-db-sync-n9rp2" Nov 25 17:03:26 crc kubenswrapper[4812]: I1125 17:03:26.516486 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-krnrg\" (UniqueName: \"kubernetes.io/projected/9c9a938f-a4f9-4739-837a-928721d40a65-kube-api-access-krnrg\") pod \"glance-db-sync-n9rp2\" (UID: \"9c9a938f-a4f9-4739-837a-928721d40a65\") " pod="openstack/glance-db-sync-n9rp2" Nov 25 17:03:26 crc kubenswrapper[4812]: I1125 17:03:26.516512 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c9a938f-a4f9-4739-837a-928721d40a65-combined-ca-bundle\") pod \"glance-db-sync-n9rp2\" (UID: \"9c9a938f-a4f9-4739-837a-928721d40a65\") " pod="openstack/glance-db-sync-n9rp2" Nov 25 17:03:26 crc kubenswrapper[4812]: I1125 17:03:26.521924 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/9c9a938f-a4f9-4739-837a-928721d40a65-db-sync-config-data\") pod \"glance-db-sync-n9rp2\" (UID: \"9c9a938f-a4f9-4739-837a-928721d40a65\") " pod="openstack/glance-db-sync-n9rp2" Nov 25 17:03:26 crc kubenswrapper[4812]: I1125 17:03:26.522039 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c9a938f-a4f9-4739-837a-928721d40a65-combined-ca-bundle\") pod \"glance-db-sync-n9rp2\" (UID: \"9c9a938f-a4f9-4739-837a-928721d40a65\") " pod="openstack/glance-db-sync-n9rp2" Nov 25 17:03:26 crc kubenswrapper[4812]: I1125 17:03:26.522052 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c9a938f-a4f9-4739-837a-928721d40a65-config-data\") pod \"glance-db-sync-n9rp2\" (UID: \"9c9a938f-a4f9-4739-837a-928721d40a65\") " pod="openstack/glance-db-sync-n9rp2" Nov 25 17:03:26 crc kubenswrapper[4812]: I1125 17:03:26.533646 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-krnrg\" (UniqueName: \"kubernetes.io/projected/9c9a938f-a4f9-4739-837a-928721d40a65-kube-api-access-krnrg\") pod \"glance-db-sync-n9rp2\" (UID: \"9c9a938f-a4f9-4739-837a-928721d40a65\") " pod="openstack/glance-db-sync-n9rp2" Nov 25 17:03:26 crc kubenswrapper[4812]: I1125 17:03:26.682013 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-n9rp2" Nov 25 17:03:27 crc kubenswrapper[4812]: I1125 17:03:27.151891 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-n9rp2"] Nov 25 17:03:27 crc kubenswrapper[4812]: I1125 17:03:27.332800 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:03:27 crc kubenswrapper[4812]: I1125 17:03:27.332860 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:03:28 crc kubenswrapper[4812]: I1125 17:03:28.073225 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-n9rp2" event={"ID":"9c9a938f-a4f9-4739-837a-928721d40a65","Type":"ContainerStarted","Data":"19f26a842691392f67cddddd1a3f0a5e1e6cd722adfac8211f0332e8c2bb3d07"} Nov 25 17:03:29 crc kubenswrapper[4812]: I1125 17:03:29.408097 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 25 17:03:35 crc kubenswrapper[4812]: I1125 17:03:35.124674 4812 generic.go:334] "Generic (PLEG): container finished" podID="171759d9-0ee5-4a7c-9548-f41d11f0c112" containerID="b0d7547217f860c1a52ffd3dbef85deebcdef33e067ac69682b0cda16ff1953d" exitCode=0 Nov 25 17:03:35 crc kubenswrapper[4812]: I1125 17:03:35.124769 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"171759d9-0ee5-4a7c-9548-f41d11f0c112","Type":"ContainerDied","Data":"b0d7547217f860c1a52ffd3dbef85deebcdef33e067ac69682b0cda16ff1953d"} Nov 25 17:03:36 crc kubenswrapper[4812]: I1125 17:03:36.131906 4812 generic.go:334] "Generic (PLEG): container finished" podID="c7cd9664-97af-4900-a89e-ee5a790506c4" containerID="4c5ee131aae86943ba22365f36d2fef1657bf9647c0c6550feecccf1f6e993cb" exitCode=0 Nov 25 17:03:36 crc kubenswrapper[4812]: I1125 17:03:36.132001 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c7cd9664-97af-4900-a89e-ee5a790506c4","Type":"ContainerDied","Data":"4c5ee131aae86943ba22365f36d2fef1657bf9647c0c6550feecccf1f6e993cb"} Nov 25 17:03:36 crc kubenswrapper[4812]: I1125 17:03:36.513957 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-f5lv8" podUID="d43f9e93-ab7b-4a2f-9446-21ab9721b39f" containerName="ovn-controller" probeResult="failure" output=< Nov 25 17:03:36 crc kubenswrapper[4812]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 25 17:03:36 crc kubenswrapper[4812]: > Nov 25 17:03:36 crc kubenswrapper[4812]: I1125 17:03:36.565946 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-qntcq" Nov 25 17:03:37 crc kubenswrapper[4812]: I1125 17:03:37.142719 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-n9rp2" event={"ID":"9c9a938f-a4f9-4739-837a-928721d40a65","Type":"ContainerStarted","Data":"845dd128a87dccd086b78f9996bef637cada05bed2f95ace55818d08e138f102"} Nov 25 17:03:37 crc kubenswrapper[4812]: I1125 17:03:37.147287 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c7cd9664-97af-4900-a89e-ee5a790506c4","Type":"ContainerStarted","Data":"d61f7652fde6c7dd2b063e7c024aca8f8e21e4a4bc236a3d5cc6f40bcc1a1d4e"} Nov 25 17:03:37 crc kubenswrapper[4812]: I1125 17:03:37.147598 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:03:37 crc kubenswrapper[4812]: I1125 17:03:37.150645 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"171759d9-0ee5-4a7c-9548-f41d11f0c112","Type":"ContainerStarted","Data":"2e6144df3f1e766ed6e01582fba38e9961007c093c2391c756576d7d9136298c"} Nov 25 17:03:37 crc kubenswrapper[4812]: I1125 17:03:37.150923 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 25 17:03:37 crc kubenswrapper[4812]: I1125 17:03:37.194090 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-n9rp2" podStartSLOduration=1.764458262 podStartE2EDuration="11.194069936s" podCreationTimestamp="2025-11-25 17:03:26 +0000 UTC" firstStartedPulling="2025-11-25 17:03:27.16525176 +0000 UTC m=+982.005393855" lastFinishedPulling="2025-11-25 17:03:36.594863434 +0000 UTC m=+991.435005529" observedRunningTime="2025-11-25 17:03:37.16305663 +0000 UTC m=+992.003198735" watchObservedRunningTime="2025-11-25 17:03:37.194069936 +0000 UTC m=+992.034212021" Nov 25 17:03:37 crc kubenswrapper[4812]: I1125 17:03:37.199108 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=49.667160936 podStartE2EDuration="57.199097871s" podCreationTimestamp="2025-11-25 17:02:40 +0000 UTC" firstStartedPulling="2025-11-25 17:02:53.286184466 +0000 UTC m=+948.126326561" lastFinishedPulling="2025-11-25 17:03:00.818121401 +0000 UTC m=+955.658263496" observedRunningTime="2025-11-25 17:03:37.191913227 +0000 UTC m=+992.032055352" watchObservedRunningTime="2025-11-25 17:03:37.199097871 +0000 UTC m=+992.039239966" Nov 25 17:03:37 crc kubenswrapper[4812]: I1125 17:03:37.220679 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=49.314379649 podStartE2EDuration="56.220660992s" podCreationTimestamp="2025-11-25 17:02:41 +0000 UTC" firstStartedPulling="2025-11-25 17:02:53.911779546 +0000 UTC m=+948.751921651" lastFinishedPulling="2025-11-25 17:03:00.818060909 +0000 UTC m=+955.658202994" observedRunningTime="2025-11-25 17:03:37.218448002 +0000 UTC m=+992.058590117" watchObservedRunningTime="2025-11-25 17:03:37.220660992 +0000 UTC m=+992.060803087" Nov 25 17:03:41 crc kubenswrapper[4812]: I1125 17:03:41.508089 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-f5lv8" podUID="d43f9e93-ab7b-4a2f-9446-21ab9721b39f" containerName="ovn-controller" probeResult="failure" output=< Nov 25 17:03:41 crc kubenswrapper[4812]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 25 17:03:41 crc kubenswrapper[4812]: > Nov 25 17:03:41 crc kubenswrapper[4812]: I1125 17:03:41.563256 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-qntcq" Nov 25 17:03:41 crc kubenswrapper[4812]: I1125 17:03:41.763434 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-f5lv8-config-v9lz5"] Nov 25 17:03:41 crc kubenswrapper[4812]: I1125 17:03:41.764768 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-f5lv8-config-v9lz5" Nov 25 17:03:41 crc kubenswrapper[4812]: I1125 17:03:41.767508 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 25 17:03:41 crc kubenswrapper[4812]: I1125 17:03:41.771532 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-f5lv8-config-v9lz5"] Nov 25 17:03:41 crc kubenswrapper[4812]: I1125 17:03:41.870467 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d35bd775-9765-4a6d-900f-66c151ba1feb-scripts\") pod \"ovn-controller-f5lv8-config-v9lz5\" (UID: \"d35bd775-9765-4a6d-900f-66c151ba1feb\") " pod="openstack/ovn-controller-f5lv8-config-v9lz5" Nov 25 17:03:41 crc kubenswrapper[4812]: I1125 17:03:41.871524 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d35bd775-9765-4a6d-900f-66c151ba1feb-var-run\") pod \"ovn-controller-f5lv8-config-v9lz5\" (UID: \"d35bd775-9765-4a6d-900f-66c151ba1feb\") " pod="openstack/ovn-controller-f5lv8-config-v9lz5" Nov 25 17:03:41 crc kubenswrapper[4812]: I1125 17:03:41.871673 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d35bd775-9765-4a6d-900f-66c151ba1feb-var-log-ovn\") pod \"ovn-controller-f5lv8-config-v9lz5\" (UID: \"d35bd775-9765-4a6d-900f-66c151ba1feb\") " pod="openstack/ovn-controller-f5lv8-config-v9lz5" Nov 25 17:03:41 crc kubenswrapper[4812]: I1125 17:03:41.871814 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x8mjg\" (UniqueName: \"kubernetes.io/projected/d35bd775-9765-4a6d-900f-66c151ba1feb-kube-api-access-x8mjg\") pod \"ovn-controller-f5lv8-config-v9lz5\" (UID: \"d35bd775-9765-4a6d-900f-66c151ba1feb\") " pod="openstack/ovn-controller-f5lv8-config-v9lz5" Nov 25 17:03:41 crc kubenswrapper[4812]: I1125 17:03:41.871934 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d35bd775-9765-4a6d-900f-66c151ba1feb-var-run-ovn\") pod \"ovn-controller-f5lv8-config-v9lz5\" (UID: \"d35bd775-9765-4a6d-900f-66c151ba1feb\") " pod="openstack/ovn-controller-f5lv8-config-v9lz5" Nov 25 17:03:41 crc kubenswrapper[4812]: I1125 17:03:41.872057 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/d35bd775-9765-4a6d-900f-66c151ba1feb-additional-scripts\") pod \"ovn-controller-f5lv8-config-v9lz5\" (UID: \"d35bd775-9765-4a6d-900f-66c151ba1feb\") " pod="openstack/ovn-controller-f5lv8-config-v9lz5" Nov 25 17:03:41 crc kubenswrapper[4812]: I1125 17:03:41.974174 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x8mjg\" (UniqueName: \"kubernetes.io/projected/d35bd775-9765-4a6d-900f-66c151ba1feb-kube-api-access-x8mjg\") pod \"ovn-controller-f5lv8-config-v9lz5\" (UID: \"d35bd775-9765-4a6d-900f-66c151ba1feb\") " pod="openstack/ovn-controller-f5lv8-config-v9lz5" Nov 25 17:03:41 crc kubenswrapper[4812]: I1125 17:03:41.974234 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/d35bd775-9765-4a6d-900f-66c151ba1feb-additional-scripts\") pod \"ovn-controller-f5lv8-config-v9lz5\" (UID: \"d35bd775-9765-4a6d-900f-66c151ba1feb\") " pod="openstack/ovn-controller-f5lv8-config-v9lz5" Nov 25 17:03:41 crc kubenswrapper[4812]: I1125 17:03:41.974947 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d35bd775-9765-4a6d-900f-66c151ba1feb-var-run-ovn\") pod \"ovn-controller-f5lv8-config-v9lz5\" (UID: \"d35bd775-9765-4a6d-900f-66c151ba1feb\") " pod="openstack/ovn-controller-f5lv8-config-v9lz5" Nov 25 17:03:41 crc kubenswrapper[4812]: I1125 17:03:41.975193 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d35bd775-9765-4a6d-900f-66c151ba1feb-var-run-ovn\") pod \"ovn-controller-f5lv8-config-v9lz5\" (UID: \"d35bd775-9765-4a6d-900f-66c151ba1feb\") " pod="openstack/ovn-controller-f5lv8-config-v9lz5" Nov 25 17:03:41 crc kubenswrapper[4812]: I1125 17:03:41.975447 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/d35bd775-9765-4a6d-900f-66c151ba1feb-additional-scripts\") pod \"ovn-controller-f5lv8-config-v9lz5\" (UID: \"d35bd775-9765-4a6d-900f-66c151ba1feb\") " pod="openstack/ovn-controller-f5lv8-config-v9lz5" Nov 25 17:03:41 crc kubenswrapper[4812]: I1125 17:03:41.975488 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d35bd775-9765-4a6d-900f-66c151ba1feb-scripts\") pod \"ovn-controller-f5lv8-config-v9lz5\" (UID: \"d35bd775-9765-4a6d-900f-66c151ba1feb\") " pod="openstack/ovn-controller-f5lv8-config-v9lz5" Nov 25 17:03:41 crc kubenswrapper[4812]: I1125 17:03:41.975840 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d35bd775-9765-4a6d-900f-66c151ba1feb-var-run\") pod \"ovn-controller-f5lv8-config-v9lz5\" (UID: \"d35bd775-9765-4a6d-900f-66c151ba1feb\") " pod="openstack/ovn-controller-f5lv8-config-v9lz5" Nov 25 17:03:41 crc kubenswrapper[4812]: I1125 17:03:41.975944 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d35bd775-9765-4a6d-900f-66c151ba1feb-var-run\") pod \"ovn-controller-f5lv8-config-v9lz5\" (UID: \"d35bd775-9765-4a6d-900f-66c151ba1feb\") " pod="openstack/ovn-controller-f5lv8-config-v9lz5" Nov 25 17:03:41 crc kubenswrapper[4812]: I1125 17:03:41.976049 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d35bd775-9765-4a6d-900f-66c151ba1feb-var-log-ovn\") pod \"ovn-controller-f5lv8-config-v9lz5\" (UID: \"d35bd775-9765-4a6d-900f-66c151ba1feb\") " pod="openstack/ovn-controller-f5lv8-config-v9lz5" Nov 25 17:03:41 crc kubenswrapper[4812]: I1125 17:03:41.976068 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d35bd775-9765-4a6d-900f-66c151ba1feb-var-log-ovn\") pod \"ovn-controller-f5lv8-config-v9lz5\" (UID: \"d35bd775-9765-4a6d-900f-66c151ba1feb\") " pod="openstack/ovn-controller-f5lv8-config-v9lz5" Nov 25 17:03:41 crc kubenswrapper[4812]: I1125 17:03:41.977756 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d35bd775-9765-4a6d-900f-66c151ba1feb-scripts\") pod \"ovn-controller-f5lv8-config-v9lz5\" (UID: \"d35bd775-9765-4a6d-900f-66c151ba1feb\") " pod="openstack/ovn-controller-f5lv8-config-v9lz5" Nov 25 17:03:41 crc kubenswrapper[4812]: I1125 17:03:41.998081 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x8mjg\" (UniqueName: \"kubernetes.io/projected/d35bd775-9765-4a6d-900f-66c151ba1feb-kube-api-access-x8mjg\") pod \"ovn-controller-f5lv8-config-v9lz5\" (UID: \"d35bd775-9765-4a6d-900f-66c151ba1feb\") " pod="openstack/ovn-controller-f5lv8-config-v9lz5" Nov 25 17:03:42 crc kubenswrapper[4812]: I1125 17:03:42.088582 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-f5lv8-config-v9lz5" Nov 25 17:03:42 crc kubenswrapper[4812]: I1125 17:03:42.512290 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-f5lv8-config-v9lz5"] Nov 25 17:03:42 crc kubenswrapper[4812]: W1125 17:03:42.517244 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd35bd775_9765_4a6d_900f_66c151ba1feb.slice/crio-4265c240d78b2af8a3e07e40fe02d975fb4b3f5159ebc58b34b631de90721359 WatchSource:0}: Error finding container 4265c240d78b2af8a3e07e40fe02d975fb4b3f5159ebc58b34b631de90721359: Status 404 returned error can't find the container with id 4265c240d78b2af8a3e07e40fe02d975fb4b3f5159ebc58b34b631de90721359 Nov 25 17:03:43 crc kubenswrapper[4812]: I1125 17:03:43.224369 4812 generic.go:334] "Generic (PLEG): container finished" podID="d35bd775-9765-4a6d-900f-66c151ba1feb" containerID="44a485c037869abdd67ffb3cc99739c0a39f7d76ed02783e064cb84eed2b4f89" exitCode=0 Nov 25 17:03:43 crc kubenswrapper[4812]: I1125 17:03:43.224701 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-f5lv8-config-v9lz5" event={"ID":"d35bd775-9765-4a6d-900f-66c151ba1feb","Type":"ContainerDied","Data":"44a485c037869abdd67ffb3cc99739c0a39f7d76ed02783e064cb84eed2b4f89"} Nov 25 17:03:43 crc kubenswrapper[4812]: I1125 17:03:43.224734 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-f5lv8-config-v9lz5" event={"ID":"d35bd775-9765-4a6d-900f-66c151ba1feb","Type":"ContainerStarted","Data":"4265c240d78b2af8a3e07e40fe02d975fb4b3f5159ebc58b34b631de90721359"} Nov 25 17:03:44 crc kubenswrapper[4812]: I1125 17:03:44.520451 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-f5lv8-config-v9lz5" Nov 25 17:03:44 crc kubenswrapper[4812]: I1125 17:03:44.622054 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d35bd775-9765-4a6d-900f-66c151ba1feb-var-log-ovn\") pod \"d35bd775-9765-4a6d-900f-66c151ba1feb\" (UID: \"d35bd775-9765-4a6d-900f-66c151ba1feb\") " Nov 25 17:03:44 crc kubenswrapper[4812]: I1125 17:03:44.622134 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d35bd775-9765-4a6d-900f-66c151ba1feb-var-run-ovn\") pod \"d35bd775-9765-4a6d-900f-66c151ba1feb\" (UID: \"d35bd775-9765-4a6d-900f-66c151ba1feb\") " Nov 25 17:03:44 crc kubenswrapper[4812]: I1125 17:03:44.622153 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d35bd775-9765-4a6d-900f-66c151ba1feb-var-run\") pod \"d35bd775-9765-4a6d-900f-66c151ba1feb\" (UID: \"d35bd775-9765-4a6d-900f-66c151ba1feb\") " Nov 25 17:03:44 crc kubenswrapper[4812]: I1125 17:03:44.622197 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/d35bd775-9765-4a6d-900f-66c151ba1feb-additional-scripts\") pod \"d35bd775-9765-4a6d-900f-66c151ba1feb\" (UID: \"d35bd775-9765-4a6d-900f-66c151ba1feb\") " Nov 25 17:03:44 crc kubenswrapper[4812]: I1125 17:03:44.622186 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d35bd775-9765-4a6d-900f-66c151ba1feb-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "d35bd775-9765-4a6d-900f-66c151ba1feb" (UID: "d35bd775-9765-4a6d-900f-66c151ba1feb"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 17:03:44 crc kubenswrapper[4812]: I1125 17:03:44.622201 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d35bd775-9765-4a6d-900f-66c151ba1feb-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "d35bd775-9765-4a6d-900f-66c151ba1feb" (UID: "d35bd775-9765-4a6d-900f-66c151ba1feb"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 17:03:44 crc kubenswrapper[4812]: I1125 17:03:44.622236 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x8mjg\" (UniqueName: \"kubernetes.io/projected/d35bd775-9765-4a6d-900f-66c151ba1feb-kube-api-access-x8mjg\") pod \"d35bd775-9765-4a6d-900f-66c151ba1feb\" (UID: \"d35bd775-9765-4a6d-900f-66c151ba1feb\") " Nov 25 17:03:44 crc kubenswrapper[4812]: I1125 17:03:44.622266 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d35bd775-9765-4a6d-900f-66c151ba1feb-scripts\") pod \"d35bd775-9765-4a6d-900f-66c151ba1feb\" (UID: \"d35bd775-9765-4a6d-900f-66c151ba1feb\") " Nov 25 17:03:44 crc kubenswrapper[4812]: I1125 17:03:44.622281 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d35bd775-9765-4a6d-900f-66c151ba1feb-var-run" (OuterVolumeSpecName: "var-run") pod "d35bd775-9765-4a6d-900f-66c151ba1feb" (UID: "d35bd775-9765-4a6d-900f-66c151ba1feb"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 17:03:44 crc kubenswrapper[4812]: I1125 17:03:44.622556 4812 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d35bd775-9765-4a6d-900f-66c151ba1feb-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 17:03:44 crc kubenswrapper[4812]: I1125 17:03:44.622568 4812 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d35bd775-9765-4a6d-900f-66c151ba1feb-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 17:03:44 crc kubenswrapper[4812]: I1125 17:03:44.622578 4812 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d35bd775-9765-4a6d-900f-66c151ba1feb-var-run\") on node \"crc\" DevicePath \"\"" Nov 25 17:03:44 crc kubenswrapper[4812]: I1125 17:03:44.622996 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d35bd775-9765-4a6d-900f-66c151ba1feb-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "d35bd775-9765-4a6d-900f-66c151ba1feb" (UID: "d35bd775-9765-4a6d-900f-66c151ba1feb"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:03:44 crc kubenswrapper[4812]: I1125 17:03:44.623310 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d35bd775-9765-4a6d-900f-66c151ba1feb-scripts" (OuterVolumeSpecName: "scripts") pod "d35bd775-9765-4a6d-900f-66c151ba1feb" (UID: "d35bd775-9765-4a6d-900f-66c151ba1feb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:03:44 crc kubenswrapper[4812]: I1125 17:03:44.631531 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d35bd775-9765-4a6d-900f-66c151ba1feb-kube-api-access-x8mjg" (OuterVolumeSpecName: "kube-api-access-x8mjg") pod "d35bd775-9765-4a6d-900f-66c151ba1feb" (UID: "d35bd775-9765-4a6d-900f-66c151ba1feb"). InnerVolumeSpecName "kube-api-access-x8mjg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:03:44 crc kubenswrapper[4812]: I1125 17:03:44.723860 4812 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/d35bd775-9765-4a6d-900f-66c151ba1feb-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:03:44 crc kubenswrapper[4812]: I1125 17:03:44.723916 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x8mjg\" (UniqueName: \"kubernetes.io/projected/d35bd775-9765-4a6d-900f-66c151ba1feb-kube-api-access-x8mjg\") on node \"crc\" DevicePath \"\"" Nov 25 17:03:44 crc kubenswrapper[4812]: I1125 17:03:44.723930 4812 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d35bd775-9765-4a6d-900f-66c151ba1feb-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:03:45 crc kubenswrapper[4812]: I1125 17:03:45.242931 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-f5lv8-config-v9lz5" event={"ID":"d35bd775-9765-4a6d-900f-66c151ba1feb","Type":"ContainerDied","Data":"4265c240d78b2af8a3e07e40fe02d975fb4b3f5159ebc58b34b631de90721359"} Nov 25 17:03:45 crc kubenswrapper[4812]: I1125 17:03:45.243272 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4265c240d78b2af8a3e07e40fe02d975fb4b3f5159ebc58b34b631de90721359" Nov 25 17:03:45 crc kubenswrapper[4812]: I1125 17:03:45.243041 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-f5lv8-config-v9lz5" Nov 25 17:03:45 crc kubenswrapper[4812]: I1125 17:03:45.614721 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-f5lv8-config-v9lz5"] Nov 25 17:03:45 crc kubenswrapper[4812]: I1125 17:03:45.624136 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-f5lv8-config-v9lz5"] Nov 25 17:03:45 crc kubenswrapper[4812]: I1125 17:03:45.723789 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-f5lv8-config-x67qs"] Nov 25 17:03:45 crc kubenswrapper[4812]: E1125 17:03:45.724194 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d35bd775-9765-4a6d-900f-66c151ba1feb" containerName="ovn-config" Nov 25 17:03:45 crc kubenswrapper[4812]: I1125 17:03:45.724219 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="d35bd775-9765-4a6d-900f-66c151ba1feb" containerName="ovn-config" Nov 25 17:03:45 crc kubenswrapper[4812]: I1125 17:03:45.724449 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="d35bd775-9765-4a6d-900f-66c151ba1feb" containerName="ovn-config" Nov 25 17:03:45 crc kubenswrapper[4812]: I1125 17:03:45.725163 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-f5lv8-config-x67qs" Nov 25 17:03:45 crc kubenswrapper[4812]: I1125 17:03:45.728775 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 25 17:03:45 crc kubenswrapper[4812]: I1125 17:03:45.738395 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-f5lv8-config-x67qs"] Nov 25 17:03:45 crc kubenswrapper[4812]: I1125 17:03:45.840854 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/abf7c512-3c29-499a-85f6-2746630689b4-scripts\") pod \"ovn-controller-f5lv8-config-x67qs\" (UID: \"abf7c512-3c29-499a-85f6-2746630689b4\") " pod="openstack/ovn-controller-f5lv8-config-x67qs" Nov 25 17:03:45 crc kubenswrapper[4812]: I1125 17:03:45.840920 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qn9pm\" (UniqueName: \"kubernetes.io/projected/abf7c512-3c29-499a-85f6-2746630689b4-kube-api-access-qn9pm\") pod \"ovn-controller-f5lv8-config-x67qs\" (UID: \"abf7c512-3c29-499a-85f6-2746630689b4\") " pod="openstack/ovn-controller-f5lv8-config-x67qs" Nov 25 17:03:45 crc kubenswrapper[4812]: I1125 17:03:45.841417 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/abf7c512-3c29-499a-85f6-2746630689b4-var-run\") pod \"ovn-controller-f5lv8-config-x67qs\" (UID: \"abf7c512-3c29-499a-85f6-2746630689b4\") " pod="openstack/ovn-controller-f5lv8-config-x67qs" Nov 25 17:03:45 crc kubenswrapper[4812]: I1125 17:03:45.841520 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d35bd775-9765-4a6d-900f-66c151ba1feb" path="/var/lib/kubelet/pods/d35bd775-9765-4a6d-900f-66c151ba1feb/volumes" Nov 25 17:03:45 crc kubenswrapper[4812]: I1125 17:03:45.841674 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/abf7c512-3c29-499a-85f6-2746630689b4-var-log-ovn\") pod \"ovn-controller-f5lv8-config-x67qs\" (UID: \"abf7c512-3c29-499a-85f6-2746630689b4\") " pod="openstack/ovn-controller-f5lv8-config-x67qs" Nov 25 17:03:45 crc kubenswrapper[4812]: I1125 17:03:45.841863 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/abf7c512-3c29-499a-85f6-2746630689b4-additional-scripts\") pod \"ovn-controller-f5lv8-config-x67qs\" (UID: \"abf7c512-3c29-499a-85f6-2746630689b4\") " pod="openstack/ovn-controller-f5lv8-config-x67qs" Nov 25 17:03:45 crc kubenswrapper[4812]: I1125 17:03:45.841904 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/abf7c512-3c29-499a-85f6-2746630689b4-var-run-ovn\") pod \"ovn-controller-f5lv8-config-x67qs\" (UID: \"abf7c512-3c29-499a-85f6-2746630689b4\") " pod="openstack/ovn-controller-f5lv8-config-x67qs" Nov 25 17:03:45 crc kubenswrapper[4812]: I1125 17:03:45.944006 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/abf7c512-3c29-499a-85f6-2746630689b4-additional-scripts\") pod \"ovn-controller-f5lv8-config-x67qs\" (UID: \"abf7c512-3c29-499a-85f6-2746630689b4\") " pod="openstack/ovn-controller-f5lv8-config-x67qs" Nov 25 17:03:45 crc kubenswrapper[4812]: I1125 17:03:45.944092 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/abf7c512-3c29-499a-85f6-2746630689b4-var-run-ovn\") pod \"ovn-controller-f5lv8-config-x67qs\" (UID: \"abf7c512-3c29-499a-85f6-2746630689b4\") " pod="openstack/ovn-controller-f5lv8-config-x67qs" Nov 25 17:03:45 crc kubenswrapper[4812]: I1125 17:03:45.944168 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/abf7c512-3c29-499a-85f6-2746630689b4-scripts\") pod \"ovn-controller-f5lv8-config-x67qs\" (UID: \"abf7c512-3c29-499a-85f6-2746630689b4\") " pod="openstack/ovn-controller-f5lv8-config-x67qs" Nov 25 17:03:45 crc kubenswrapper[4812]: I1125 17:03:45.944218 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qn9pm\" (UniqueName: \"kubernetes.io/projected/abf7c512-3c29-499a-85f6-2746630689b4-kube-api-access-qn9pm\") pod \"ovn-controller-f5lv8-config-x67qs\" (UID: \"abf7c512-3c29-499a-85f6-2746630689b4\") " pod="openstack/ovn-controller-f5lv8-config-x67qs" Nov 25 17:03:45 crc kubenswrapper[4812]: I1125 17:03:45.944262 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/abf7c512-3c29-499a-85f6-2746630689b4-var-run\") pod \"ovn-controller-f5lv8-config-x67qs\" (UID: \"abf7c512-3c29-499a-85f6-2746630689b4\") " pod="openstack/ovn-controller-f5lv8-config-x67qs" Nov 25 17:03:45 crc kubenswrapper[4812]: I1125 17:03:45.944321 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/abf7c512-3c29-499a-85f6-2746630689b4-var-log-ovn\") pod \"ovn-controller-f5lv8-config-x67qs\" (UID: \"abf7c512-3c29-499a-85f6-2746630689b4\") " pod="openstack/ovn-controller-f5lv8-config-x67qs" Nov 25 17:03:45 crc kubenswrapper[4812]: I1125 17:03:45.944498 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/abf7c512-3c29-499a-85f6-2746630689b4-var-log-ovn\") pod \"ovn-controller-f5lv8-config-x67qs\" (UID: \"abf7c512-3c29-499a-85f6-2746630689b4\") " pod="openstack/ovn-controller-f5lv8-config-x67qs" Nov 25 17:03:45 crc kubenswrapper[4812]: I1125 17:03:45.944516 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/abf7c512-3c29-499a-85f6-2746630689b4-var-run-ovn\") pod \"ovn-controller-f5lv8-config-x67qs\" (UID: \"abf7c512-3c29-499a-85f6-2746630689b4\") " pod="openstack/ovn-controller-f5lv8-config-x67qs" Nov 25 17:03:45 crc kubenswrapper[4812]: I1125 17:03:45.945019 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/abf7c512-3c29-499a-85f6-2746630689b4-var-run\") pod \"ovn-controller-f5lv8-config-x67qs\" (UID: \"abf7c512-3c29-499a-85f6-2746630689b4\") " pod="openstack/ovn-controller-f5lv8-config-x67qs" Nov 25 17:03:45 crc kubenswrapper[4812]: I1125 17:03:45.945323 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/abf7c512-3c29-499a-85f6-2746630689b4-additional-scripts\") pod \"ovn-controller-f5lv8-config-x67qs\" (UID: \"abf7c512-3c29-499a-85f6-2746630689b4\") " pod="openstack/ovn-controller-f5lv8-config-x67qs" Nov 25 17:03:45 crc kubenswrapper[4812]: I1125 17:03:45.946907 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/abf7c512-3c29-499a-85f6-2746630689b4-scripts\") pod \"ovn-controller-f5lv8-config-x67qs\" (UID: \"abf7c512-3c29-499a-85f6-2746630689b4\") " pod="openstack/ovn-controller-f5lv8-config-x67qs" Nov 25 17:03:45 crc kubenswrapper[4812]: I1125 17:03:45.964008 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qn9pm\" (UniqueName: \"kubernetes.io/projected/abf7c512-3c29-499a-85f6-2746630689b4-kube-api-access-qn9pm\") pod \"ovn-controller-f5lv8-config-x67qs\" (UID: \"abf7c512-3c29-499a-85f6-2746630689b4\") " pod="openstack/ovn-controller-f5lv8-config-x67qs" Nov 25 17:03:46 crc kubenswrapper[4812]: I1125 17:03:46.043388 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-f5lv8-config-x67qs" Nov 25 17:03:46 crc kubenswrapper[4812]: I1125 17:03:46.483469 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-f5lv8-config-x67qs"] Nov 25 17:03:46 crc kubenswrapper[4812]: W1125 17:03:46.504373 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podabf7c512_3c29_499a_85f6_2746630689b4.slice/crio-d59c7044e8feba8588ba1119516208cb9d59276d1a1834dc6a76ff83baa8ef7b WatchSource:0}: Error finding container d59c7044e8feba8588ba1119516208cb9d59276d1a1834dc6a76ff83baa8ef7b: Status 404 returned error can't find the container with id d59c7044e8feba8588ba1119516208cb9d59276d1a1834dc6a76ff83baa8ef7b Nov 25 17:03:46 crc kubenswrapper[4812]: I1125 17:03:46.520554 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-f5lv8" Nov 25 17:03:47 crc kubenswrapper[4812]: I1125 17:03:47.260154 4812 generic.go:334] "Generic (PLEG): container finished" podID="abf7c512-3c29-499a-85f6-2746630689b4" containerID="76b92ddb195df309e4615329b555ae060e8431f41e64e19dc3d66afa9126f61d" exitCode=0 Nov 25 17:03:47 crc kubenswrapper[4812]: I1125 17:03:47.260203 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-f5lv8-config-x67qs" event={"ID":"abf7c512-3c29-499a-85f6-2746630689b4","Type":"ContainerDied","Data":"76b92ddb195df309e4615329b555ae060e8431f41e64e19dc3d66afa9126f61d"} Nov 25 17:03:47 crc kubenswrapper[4812]: I1125 17:03:47.260483 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-f5lv8-config-x67qs" event={"ID":"abf7c512-3c29-499a-85f6-2746630689b4","Type":"ContainerStarted","Data":"d59c7044e8feba8588ba1119516208cb9d59276d1a1834dc6a76ff83baa8ef7b"} Nov 25 17:03:48 crc kubenswrapper[4812]: I1125 17:03:48.269036 4812 generic.go:334] "Generic (PLEG): container finished" podID="9c9a938f-a4f9-4739-837a-928721d40a65" containerID="845dd128a87dccd086b78f9996bef637cada05bed2f95ace55818d08e138f102" exitCode=0 Nov 25 17:03:48 crc kubenswrapper[4812]: I1125 17:03:48.269164 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-n9rp2" event={"ID":"9c9a938f-a4f9-4739-837a-928721d40a65","Type":"ContainerDied","Data":"845dd128a87dccd086b78f9996bef637cada05bed2f95ace55818d08e138f102"} Nov 25 17:03:48 crc kubenswrapper[4812]: I1125 17:03:48.550446 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-f5lv8-config-x67qs" Nov 25 17:03:48 crc kubenswrapper[4812]: I1125 17:03:48.691841 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/abf7c512-3c29-499a-85f6-2746630689b4-var-log-ovn\") pod \"abf7c512-3c29-499a-85f6-2746630689b4\" (UID: \"abf7c512-3c29-499a-85f6-2746630689b4\") " Nov 25 17:03:48 crc kubenswrapper[4812]: I1125 17:03:48.691917 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/abf7c512-3c29-499a-85f6-2746630689b4-additional-scripts\") pod \"abf7c512-3c29-499a-85f6-2746630689b4\" (UID: \"abf7c512-3c29-499a-85f6-2746630689b4\") " Nov 25 17:03:48 crc kubenswrapper[4812]: I1125 17:03:48.691971 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/abf7c512-3c29-499a-85f6-2746630689b4-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "abf7c512-3c29-499a-85f6-2746630689b4" (UID: "abf7c512-3c29-499a-85f6-2746630689b4"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 17:03:48 crc kubenswrapper[4812]: I1125 17:03:48.692583 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qn9pm\" (UniqueName: \"kubernetes.io/projected/abf7c512-3c29-499a-85f6-2746630689b4-kube-api-access-qn9pm\") pod \"abf7c512-3c29-499a-85f6-2746630689b4\" (UID: \"abf7c512-3c29-499a-85f6-2746630689b4\") " Nov 25 17:03:48 crc kubenswrapper[4812]: I1125 17:03:48.692657 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/abf7c512-3c29-499a-85f6-2746630689b4-scripts\") pod \"abf7c512-3c29-499a-85f6-2746630689b4\" (UID: \"abf7c512-3c29-499a-85f6-2746630689b4\") " Nov 25 17:03:48 crc kubenswrapper[4812]: I1125 17:03:48.692710 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/abf7c512-3c29-499a-85f6-2746630689b4-var-run\") pod \"abf7c512-3c29-499a-85f6-2746630689b4\" (UID: \"abf7c512-3c29-499a-85f6-2746630689b4\") " Nov 25 17:03:48 crc kubenswrapper[4812]: I1125 17:03:48.692738 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/abf7c512-3c29-499a-85f6-2746630689b4-var-run-ovn\") pod \"abf7c512-3c29-499a-85f6-2746630689b4\" (UID: \"abf7c512-3c29-499a-85f6-2746630689b4\") " Nov 25 17:03:48 crc kubenswrapper[4812]: I1125 17:03:48.692789 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/abf7c512-3c29-499a-85f6-2746630689b4-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "abf7c512-3c29-499a-85f6-2746630689b4" (UID: "abf7c512-3c29-499a-85f6-2746630689b4"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:03:48 crc kubenswrapper[4812]: I1125 17:03:48.692827 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/abf7c512-3c29-499a-85f6-2746630689b4-var-run" (OuterVolumeSpecName: "var-run") pod "abf7c512-3c29-499a-85f6-2746630689b4" (UID: "abf7c512-3c29-499a-85f6-2746630689b4"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 17:03:48 crc kubenswrapper[4812]: I1125 17:03:48.692879 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/abf7c512-3c29-499a-85f6-2746630689b4-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "abf7c512-3c29-499a-85f6-2746630689b4" (UID: "abf7c512-3c29-499a-85f6-2746630689b4"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 17:03:48 crc kubenswrapper[4812]: I1125 17:03:48.693177 4812 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/abf7c512-3c29-499a-85f6-2746630689b4-var-run\") on node \"crc\" DevicePath \"\"" Nov 25 17:03:48 crc kubenswrapper[4812]: I1125 17:03:48.693201 4812 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/abf7c512-3c29-499a-85f6-2746630689b4-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 17:03:48 crc kubenswrapper[4812]: I1125 17:03:48.693211 4812 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/abf7c512-3c29-499a-85f6-2746630689b4-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 17:03:48 crc kubenswrapper[4812]: I1125 17:03:48.693220 4812 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/abf7c512-3c29-499a-85f6-2746630689b4-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:03:48 crc kubenswrapper[4812]: I1125 17:03:48.693455 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/abf7c512-3c29-499a-85f6-2746630689b4-scripts" (OuterVolumeSpecName: "scripts") pod "abf7c512-3c29-499a-85f6-2746630689b4" (UID: "abf7c512-3c29-499a-85f6-2746630689b4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:03:48 crc kubenswrapper[4812]: I1125 17:03:48.697151 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/abf7c512-3c29-499a-85f6-2746630689b4-kube-api-access-qn9pm" (OuterVolumeSpecName: "kube-api-access-qn9pm") pod "abf7c512-3c29-499a-85f6-2746630689b4" (UID: "abf7c512-3c29-499a-85f6-2746630689b4"). InnerVolumeSpecName "kube-api-access-qn9pm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:03:48 crc kubenswrapper[4812]: I1125 17:03:48.794065 4812 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/abf7c512-3c29-499a-85f6-2746630689b4-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:03:48 crc kubenswrapper[4812]: I1125 17:03:48.794107 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qn9pm\" (UniqueName: \"kubernetes.io/projected/abf7c512-3c29-499a-85f6-2746630689b4-kube-api-access-qn9pm\") on node \"crc\" DevicePath \"\"" Nov 25 17:03:49 crc kubenswrapper[4812]: I1125 17:03:49.281725 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-f5lv8-config-x67qs" Nov 25 17:03:49 crc kubenswrapper[4812]: I1125 17:03:49.281877 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-f5lv8-config-x67qs" event={"ID":"abf7c512-3c29-499a-85f6-2746630689b4","Type":"ContainerDied","Data":"d59c7044e8feba8588ba1119516208cb9d59276d1a1834dc6a76ff83baa8ef7b"} Nov 25 17:03:49 crc kubenswrapper[4812]: I1125 17:03:49.282072 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d59c7044e8feba8588ba1119516208cb9d59276d1a1834dc6a76ff83baa8ef7b" Nov 25 17:03:49 crc kubenswrapper[4812]: I1125 17:03:49.617554 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-f5lv8-config-x67qs"] Nov 25 17:03:49 crc kubenswrapper[4812]: I1125 17:03:49.627486 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-f5lv8-config-x67qs"] Nov 25 17:03:49 crc kubenswrapper[4812]: I1125 17:03:49.651022 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-n9rp2" Nov 25 17:03:49 crc kubenswrapper[4812]: I1125 17:03:49.808242 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-krnrg\" (UniqueName: \"kubernetes.io/projected/9c9a938f-a4f9-4739-837a-928721d40a65-kube-api-access-krnrg\") pod \"9c9a938f-a4f9-4739-837a-928721d40a65\" (UID: \"9c9a938f-a4f9-4739-837a-928721d40a65\") " Nov 25 17:03:49 crc kubenswrapper[4812]: I1125 17:03:49.808356 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c9a938f-a4f9-4739-837a-928721d40a65-config-data\") pod \"9c9a938f-a4f9-4739-837a-928721d40a65\" (UID: \"9c9a938f-a4f9-4739-837a-928721d40a65\") " Nov 25 17:03:49 crc kubenswrapper[4812]: I1125 17:03:49.808427 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c9a938f-a4f9-4739-837a-928721d40a65-combined-ca-bundle\") pod \"9c9a938f-a4f9-4739-837a-928721d40a65\" (UID: \"9c9a938f-a4f9-4739-837a-928721d40a65\") " Nov 25 17:03:49 crc kubenswrapper[4812]: I1125 17:03:49.808573 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/9c9a938f-a4f9-4739-837a-928721d40a65-db-sync-config-data\") pod \"9c9a938f-a4f9-4739-837a-928721d40a65\" (UID: \"9c9a938f-a4f9-4739-837a-928721d40a65\") " Nov 25 17:03:49 crc kubenswrapper[4812]: I1125 17:03:49.813093 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9c9a938f-a4f9-4739-837a-928721d40a65-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "9c9a938f-a4f9-4739-837a-928721d40a65" (UID: "9c9a938f-a4f9-4739-837a-928721d40a65"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:03:49 crc kubenswrapper[4812]: I1125 17:03:49.813276 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c9a938f-a4f9-4739-837a-928721d40a65-kube-api-access-krnrg" (OuterVolumeSpecName: "kube-api-access-krnrg") pod "9c9a938f-a4f9-4739-837a-928721d40a65" (UID: "9c9a938f-a4f9-4739-837a-928721d40a65"). InnerVolumeSpecName "kube-api-access-krnrg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:03:49 crc kubenswrapper[4812]: I1125 17:03:49.830904 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9c9a938f-a4f9-4739-837a-928721d40a65-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9c9a938f-a4f9-4739-837a-928721d40a65" (UID: "9c9a938f-a4f9-4739-837a-928721d40a65"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:03:49 crc kubenswrapper[4812]: I1125 17:03:49.841300 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="abf7c512-3c29-499a-85f6-2746630689b4" path="/var/lib/kubelet/pods/abf7c512-3c29-499a-85f6-2746630689b4/volumes" Nov 25 17:03:49 crc kubenswrapper[4812]: I1125 17:03:49.848036 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9c9a938f-a4f9-4739-837a-928721d40a65-config-data" (OuterVolumeSpecName: "config-data") pod "9c9a938f-a4f9-4739-837a-928721d40a65" (UID: "9c9a938f-a4f9-4739-837a-928721d40a65"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:03:49 crc kubenswrapper[4812]: I1125 17:03:49.910178 4812 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c9a938f-a4f9-4739-837a-928721d40a65-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:03:49 crc kubenswrapper[4812]: I1125 17:03:49.910214 4812 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/9c9a938f-a4f9-4739-837a-928721d40a65-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:03:49 crc kubenswrapper[4812]: I1125 17:03:49.910224 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-krnrg\" (UniqueName: \"kubernetes.io/projected/9c9a938f-a4f9-4739-837a-928721d40a65-kube-api-access-krnrg\") on node \"crc\" DevicePath \"\"" Nov 25 17:03:49 crc kubenswrapper[4812]: I1125 17:03:49.910236 4812 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c9a938f-a4f9-4739-837a-928721d40a65-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:03:50 crc kubenswrapper[4812]: I1125 17:03:50.291232 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-n9rp2" event={"ID":"9c9a938f-a4f9-4739-837a-928721d40a65","Type":"ContainerDied","Data":"19f26a842691392f67cddddd1a3f0a5e1e6cd722adfac8211f0332e8c2bb3d07"} Nov 25 17:03:50 crc kubenswrapper[4812]: I1125 17:03:50.291274 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="19f26a842691392f67cddddd1a3f0a5e1e6cd722adfac8211f0332e8c2bb3d07" Nov 25 17:03:50 crc kubenswrapper[4812]: I1125 17:03:50.291310 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-n9rp2" Nov 25 17:03:50 crc kubenswrapper[4812]: I1125 17:03:50.615959 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-54f9b7b8d9-w8rxh"] Nov 25 17:03:50 crc kubenswrapper[4812]: E1125 17:03:50.616268 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c9a938f-a4f9-4739-837a-928721d40a65" containerName="glance-db-sync" Nov 25 17:03:50 crc kubenswrapper[4812]: I1125 17:03:50.616283 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c9a938f-a4f9-4739-837a-928721d40a65" containerName="glance-db-sync" Nov 25 17:03:50 crc kubenswrapper[4812]: E1125 17:03:50.616305 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="abf7c512-3c29-499a-85f6-2746630689b4" containerName="ovn-config" Nov 25 17:03:50 crc kubenswrapper[4812]: I1125 17:03:50.616312 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="abf7c512-3c29-499a-85f6-2746630689b4" containerName="ovn-config" Nov 25 17:03:50 crc kubenswrapper[4812]: I1125 17:03:50.616456 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="abf7c512-3c29-499a-85f6-2746630689b4" containerName="ovn-config" Nov 25 17:03:50 crc kubenswrapper[4812]: I1125 17:03:50.616474 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c9a938f-a4f9-4739-837a-928721d40a65" containerName="glance-db-sync" Nov 25 17:03:50 crc kubenswrapper[4812]: I1125 17:03:50.617256 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54f9b7b8d9-w8rxh" Nov 25 17:03:50 crc kubenswrapper[4812]: I1125 17:03:50.624203 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/07da6a02-0fa5-4f26-be21-ab68f365c412-dns-svc\") pod \"dnsmasq-dns-54f9b7b8d9-w8rxh\" (UID: \"07da6a02-0fa5-4f26-be21-ab68f365c412\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-w8rxh" Nov 25 17:03:50 crc kubenswrapper[4812]: I1125 17:03:50.624280 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/07da6a02-0fa5-4f26-be21-ab68f365c412-ovsdbserver-sb\") pod \"dnsmasq-dns-54f9b7b8d9-w8rxh\" (UID: \"07da6a02-0fa5-4f26-be21-ab68f365c412\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-w8rxh" Nov 25 17:03:50 crc kubenswrapper[4812]: I1125 17:03:50.624307 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-54f9b7b8d9-w8rxh"] Nov 25 17:03:50 crc kubenswrapper[4812]: I1125 17:03:50.624313 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hdtzz\" (UniqueName: \"kubernetes.io/projected/07da6a02-0fa5-4f26-be21-ab68f365c412-kube-api-access-hdtzz\") pod \"dnsmasq-dns-54f9b7b8d9-w8rxh\" (UID: \"07da6a02-0fa5-4f26-be21-ab68f365c412\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-w8rxh" Nov 25 17:03:50 crc kubenswrapper[4812]: I1125 17:03:50.624544 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/07da6a02-0fa5-4f26-be21-ab68f365c412-ovsdbserver-nb\") pod \"dnsmasq-dns-54f9b7b8d9-w8rxh\" (UID: \"07da6a02-0fa5-4f26-be21-ab68f365c412\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-w8rxh" Nov 25 17:03:50 crc kubenswrapper[4812]: I1125 17:03:50.624566 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/07da6a02-0fa5-4f26-be21-ab68f365c412-config\") pod \"dnsmasq-dns-54f9b7b8d9-w8rxh\" (UID: \"07da6a02-0fa5-4f26-be21-ab68f365c412\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-w8rxh" Nov 25 17:03:50 crc kubenswrapper[4812]: I1125 17:03:50.725780 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hdtzz\" (UniqueName: \"kubernetes.io/projected/07da6a02-0fa5-4f26-be21-ab68f365c412-kube-api-access-hdtzz\") pod \"dnsmasq-dns-54f9b7b8d9-w8rxh\" (UID: \"07da6a02-0fa5-4f26-be21-ab68f365c412\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-w8rxh" Nov 25 17:03:50 crc kubenswrapper[4812]: I1125 17:03:50.725876 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/07da6a02-0fa5-4f26-be21-ab68f365c412-ovsdbserver-nb\") pod \"dnsmasq-dns-54f9b7b8d9-w8rxh\" (UID: \"07da6a02-0fa5-4f26-be21-ab68f365c412\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-w8rxh" Nov 25 17:03:50 crc kubenswrapper[4812]: I1125 17:03:50.725897 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/07da6a02-0fa5-4f26-be21-ab68f365c412-config\") pod \"dnsmasq-dns-54f9b7b8d9-w8rxh\" (UID: \"07da6a02-0fa5-4f26-be21-ab68f365c412\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-w8rxh" Nov 25 17:03:50 crc kubenswrapper[4812]: I1125 17:03:50.725946 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/07da6a02-0fa5-4f26-be21-ab68f365c412-dns-svc\") pod \"dnsmasq-dns-54f9b7b8d9-w8rxh\" (UID: \"07da6a02-0fa5-4f26-be21-ab68f365c412\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-w8rxh" Nov 25 17:03:50 crc kubenswrapper[4812]: I1125 17:03:50.725997 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/07da6a02-0fa5-4f26-be21-ab68f365c412-ovsdbserver-sb\") pod \"dnsmasq-dns-54f9b7b8d9-w8rxh\" (UID: \"07da6a02-0fa5-4f26-be21-ab68f365c412\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-w8rxh" Nov 25 17:03:50 crc kubenswrapper[4812]: I1125 17:03:50.726961 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/07da6a02-0fa5-4f26-be21-ab68f365c412-ovsdbserver-sb\") pod \"dnsmasq-dns-54f9b7b8d9-w8rxh\" (UID: \"07da6a02-0fa5-4f26-be21-ab68f365c412\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-w8rxh" Nov 25 17:03:50 crc kubenswrapper[4812]: I1125 17:03:50.727212 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/07da6a02-0fa5-4f26-be21-ab68f365c412-ovsdbserver-nb\") pod \"dnsmasq-dns-54f9b7b8d9-w8rxh\" (UID: \"07da6a02-0fa5-4f26-be21-ab68f365c412\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-w8rxh" Nov 25 17:03:50 crc kubenswrapper[4812]: I1125 17:03:50.727665 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/07da6a02-0fa5-4f26-be21-ab68f365c412-config\") pod \"dnsmasq-dns-54f9b7b8d9-w8rxh\" (UID: \"07da6a02-0fa5-4f26-be21-ab68f365c412\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-w8rxh" Nov 25 17:03:50 crc kubenswrapper[4812]: I1125 17:03:50.728006 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/07da6a02-0fa5-4f26-be21-ab68f365c412-dns-svc\") pod \"dnsmasq-dns-54f9b7b8d9-w8rxh\" (UID: \"07da6a02-0fa5-4f26-be21-ab68f365c412\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-w8rxh" Nov 25 17:03:50 crc kubenswrapper[4812]: I1125 17:03:50.744726 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hdtzz\" (UniqueName: \"kubernetes.io/projected/07da6a02-0fa5-4f26-be21-ab68f365c412-kube-api-access-hdtzz\") pod \"dnsmasq-dns-54f9b7b8d9-w8rxh\" (UID: \"07da6a02-0fa5-4f26-be21-ab68f365c412\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-w8rxh" Nov 25 17:03:50 crc kubenswrapper[4812]: I1125 17:03:50.933587 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54f9b7b8d9-w8rxh" Nov 25 17:03:51 crc kubenswrapper[4812]: I1125 17:03:51.444315 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-54f9b7b8d9-w8rxh"] Nov 25 17:03:51 crc kubenswrapper[4812]: W1125 17:03:51.449479 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod07da6a02_0fa5_4f26_be21_ab68f365c412.slice/crio-6099ef8372e6eeaaf1e6f7430215f3ea4ec02689097b5b0c0e64f2fc497d4e67 WatchSource:0}: Error finding container 6099ef8372e6eeaaf1e6f7430215f3ea4ec02689097b5b0c0e64f2fc497d4e67: Status 404 returned error can't find the container with id 6099ef8372e6eeaaf1e6f7430215f3ea4ec02689097b5b0c0e64f2fc497d4e67 Nov 25 17:03:52 crc kubenswrapper[4812]: I1125 17:03:52.273703 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:03:52 crc kubenswrapper[4812]: I1125 17:03:52.306933 4812 generic.go:334] "Generic (PLEG): container finished" podID="07da6a02-0fa5-4f26-be21-ab68f365c412" containerID="2c20c37cb7b174f00153733b5b0afffa924da1054c264659e6b9e3c913a1fa73" exitCode=0 Nov 25 17:03:52 crc kubenswrapper[4812]: I1125 17:03:52.306995 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54f9b7b8d9-w8rxh" event={"ID":"07da6a02-0fa5-4f26-be21-ab68f365c412","Type":"ContainerDied","Data":"2c20c37cb7b174f00153733b5b0afffa924da1054c264659e6b9e3c913a1fa73"} Nov 25 17:03:52 crc kubenswrapper[4812]: I1125 17:03:52.307024 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54f9b7b8d9-w8rxh" event={"ID":"07da6a02-0fa5-4f26-be21-ab68f365c412","Type":"ContainerStarted","Data":"6099ef8372e6eeaaf1e6f7430215f3ea4ec02689097b5b0c0e64f2fc497d4e67"} Nov 25 17:03:52 crc kubenswrapper[4812]: I1125 17:03:52.570713 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 25 17:03:53 crc kubenswrapper[4812]: I1125 17:03:53.318782 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54f9b7b8d9-w8rxh" event={"ID":"07da6a02-0fa5-4f26-be21-ab68f365c412","Type":"ContainerStarted","Data":"b2592d6694eb11c675ad251d0732050581d4d07cbe5de16460b6cc2f94cf7c46"} Nov 25 17:03:53 crc kubenswrapper[4812]: I1125 17:03:53.319181 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-54f9b7b8d9-w8rxh" Nov 25 17:03:53 crc kubenswrapper[4812]: I1125 17:03:53.348907 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-54f9b7b8d9-w8rxh" podStartSLOduration=3.348887238 podStartE2EDuration="3.348887238s" podCreationTimestamp="2025-11-25 17:03:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:03:53.34490852 +0000 UTC m=+1008.185050625" watchObservedRunningTime="2025-11-25 17:03:53.348887238 +0000 UTC m=+1008.189029333" Nov 25 17:03:53 crc kubenswrapper[4812]: I1125 17:03:53.981270 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-xkff4"] Nov 25 17:03:53 crc kubenswrapper[4812]: I1125 17:03:53.982570 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-xkff4" Nov 25 17:03:53 crc kubenswrapper[4812]: I1125 17:03:53.990800 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-61d1-account-create-znx74"] Nov 25 17:03:53 crc kubenswrapper[4812]: I1125 17:03:53.991887 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-61d1-account-create-znx74" Nov 25 17:03:53 crc kubenswrapper[4812]: I1125 17:03:53.995158 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.009390 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-xkff4"] Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.020243 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-61d1-account-create-znx74"] Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.077669 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vl5mp\" (UniqueName: \"kubernetes.io/projected/28562fbc-1113-4840-a9cf-597672e44f69-kube-api-access-vl5mp\") pod \"cinder-db-create-xkff4\" (UID: \"28562fbc-1113-4840-a9cf-597672e44f69\") " pod="openstack/cinder-db-create-xkff4" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.077738 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/28562fbc-1113-4840-a9cf-597672e44f69-operator-scripts\") pod \"cinder-db-create-xkff4\" (UID: \"28562fbc-1113-4840-a9cf-597672e44f69\") " pod="openstack/cinder-db-create-xkff4" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.090343 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-ndx2b"] Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.091704 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-ndx2b" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.102052 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-ndx2b"] Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.178873 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vl5mp\" (UniqueName: \"kubernetes.io/projected/28562fbc-1113-4840-a9cf-597672e44f69-kube-api-access-vl5mp\") pod \"cinder-db-create-xkff4\" (UID: \"28562fbc-1113-4840-a9cf-597672e44f69\") " pod="openstack/cinder-db-create-xkff4" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.178925 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/28562fbc-1113-4840-a9cf-597672e44f69-operator-scripts\") pod \"cinder-db-create-xkff4\" (UID: \"28562fbc-1113-4840-a9cf-597672e44f69\") " pod="openstack/cinder-db-create-xkff4" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.178959 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/703cd6e3-90a5-4c10-9f88-d4faac8e24e0-operator-scripts\") pod \"cinder-61d1-account-create-znx74\" (UID: \"703cd6e3-90a5-4c10-9f88-d4faac8e24e0\") " pod="openstack/cinder-61d1-account-create-znx74" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.179042 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7vltp\" (UniqueName: \"kubernetes.io/projected/703cd6e3-90a5-4c10-9f88-d4faac8e24e0-kube-api-access-7vltp\") pod \"cinder-61d1-account-create-znx74\" (UID: \"703cd6e3-90a5-4c10-9f88-d4faac8e24e0\") " pod="openstack/cinder-61d1-account-create-znx74" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.179967 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/28562fbc-1113-4840-a9cf-597672e44f69-operator-scripts\") pod \"cinder-db-create-xkff4\" (UID: \"28562fbc-1113-4840-a9cf-597672e44f69\") " pod="openstack/cinder-db-create-xkff4" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.185155 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-5a3f-account-create-mzlm9"] Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.186260 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-5a3f-account-create-mzlm9" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.190019 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.203199 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-5a3f-account-create-mzlm9"] Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.209218 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vl5mp\" (UniqueName: \"kubernetes.io/projected/28562fbc-1113-4840-a9cf-597672e44f69-kube-api-access-vl5mp\") pod \"cinder-db-create-xkff4\" (UID: \"28562fbc-1113-4840-a9cf-597672e44f69\") " pod="openstack/cinder-db-create-xkff4" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.271402 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-qpt4v"] Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.272941 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-qpt4v" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.274685 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.274907 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.275142 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-s426j" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.275379 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.284245 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-qpt4v"] Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.285724 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r9xwk\" (UniqueName: \"kubernetes.io/projected/f65975c6-56d0-456e-8c7c-ac900b682f94-kube-api-access-r9xwk\") pod \"barbican-5a3f-account-create-mzlm9\" (UID: \"f65975c6-56d0-456e-8c7c-ac900b682f94\") " pod="openstack/barbican-5a3f-account-create-mzlm9" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.285875 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7vltp\" (UniqueName: \"kubernetes.io/projected/703cd6e3-90a5-4c10-9f88-d4faac8e24e0-kube-api-access-7vltp\") pod \"cinder-61d1-account-create-znx74\" (UID: \"703cd6e3-90a5-4c10-9f88-d4faac8e24e0\") " pod="openstack/cinder-61d1-account-create-znx74" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.286024 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f6baf467-334f-4b16-8460-f590e01d6f65-operator-scripts\") pod \"barbican-db-create-ndx2b\" (UID: \"f6baf467-334f-4b16-8460-f590e01d6f65\") " pod="openstack/barbican-db-create-ndx2b" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.286150 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/703cd6e3-90a5-4c10-9f88-d4faac8e24e0-operator-scripts\") pod \"cinder-61d1-account-create-znx74\" (UID: \"703cd6e3-90a5-4c10-9f88-d4faac8e24e0\") " pod="openstack/cinder-61d1-account-create-znx74" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.286210 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ccnkl\" (UniqueName: \"kubernetes.io/projected/f6baf467-334f-4b16-8460-f590e01d6f65-kube-api-access-ccnkl\") pod \"barbican-db-create-ndx2b\" (UID: \"f6baf467-334f-4b16-8460-f590e01d6f65\") " pod="openstack/barbican-db-create-ndx2b" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.286246 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f65975c6-56d0-456e-8c7c-ac900b682f94-operator-scripts\") pod \"barbican-5a3f-account-create-mzlm9\" (UID: \"f65975c6-56d0-456e-8c7c-ac900b682f94\") " pod="openstack/barbican-5a3f-account-create-mzlm9" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.286937 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/703cd6e3-90a5-4c10-9f88-d4faac8e24e0-operator-scripts\") pod \"cinder-61d1-account-create-znx74\" (UID: \"703cd6e3-90a5-4c10-9f88-d4faac8e24e0\") " pod="openstack/cinder-61d1-account-create-znx74" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.303346 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-xkff4" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.312326 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7vltp\" (UniqueName: \"kubernetes.io/projected/703cd6e3-90a5-4c10-9f88-d4faac8e24e0-kube-api-access-7vltp\") pod \"cinder-61d1-account-create-znx74\" (UID: \"703cd6e3-90a5-4c10-9f88-d4faac8e24e0\") " pod="openstack/cinder-61d1-account-create-znx74" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.388880 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-qhkxm"] Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.389370 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc9bcdae-4537-427d-a3f1-064ae62d7b62-combined-ca-bundle\") pod \"keystone-db-sync-qpt4v\" (UID: \"cc9bcdae-4537-427d-a3f1-064ae62d7b62\") " pod="openstack/keystone-db-sync-qpt4v" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.389423 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f6baf467-334f-4b16-8460-f590e01d6f65-operator-scripts\") pod \"barbican-db-create-ndx2b\" (UID: \"f6baf467-334f-4b16-8460-f590e01d6f65\") " pod="openstack/barbican-db-create-ndx2b" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.389470 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc9bcdae-4537-427d-a3f1-064ae62d7b62-config-data\") pod \"keystone-db-sync-qpt4v\" (UID: \"cc9bcdae-4537-427d-a3f1-064ae62d7b62\") " pod="openstack/keystone-db-sync-qpt4v" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.389508 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ccnkl\" (UniqueName: \"kubernetes.io/projected/f6baf467-334f-4b16-8460-f590e01d6f65-kube-api-access-ccnkl\") pod \"barbican-db-create-ndx2b\" (UID: \"f6baf467-334f-4b16-8460-f590e01d6f65\") " pod="openstack/barbican-db-create-ndx2b" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.389563 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f65975c6-56d0-456e-8c7c-ac900b682f94-operator-scripts\") pod \"barbican-5a3f-account-create-mzlm9\" (UID: \"f65975c6-56d0-456e-8c7c-ac900b682f94\") " pod="openstack/barbican-5a3f-account-create-mzlm9" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.389592 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pzn6g\" (UniqueName: \"kubernetes.io/projected/cc9bcdae-4537-427d-a3f1-064ae62d7b62-kube-api-access-pzn6g\") pod \"keystone-db-sync-qpt4v\" (UID: \"cc9bcdae-4537-427d-a3f1-064ae62d7b62\") " pod="openstack/keystone-db-sync-qpt4v" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.389609 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r9xwk\" (UniqueName: \"kubernetes.io/projected/f65975c6-56d0-456e-8c7c-ac900b682f94-kube-api-access-r9xwk\") pod \"barbican-5a3f-account-create-mzlm9\" (UID: \"f65975c6-56d0-456e-8c7c-ac900b682f94\") " pod="openstack/barbican-5a3f-account-create-mzlm9" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.389821 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-qhkxm" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.390677 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f65975c6-56d0-456e-8c7c-ac900b682f94-operator-scripts\") pod \"barbican-5a3f-account-create-mzlm9\" (UID: \"f65975c6-56d0-456e-8c7c-ac900b682f94\") " pod="openstack/barbican-5a3f-account-create-mzlm9" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.390734 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f6baf467-334f-4b16-8460-f590e01d6f65-operator-scripts\") pod \"barbican-db-create-ndx2b\" (UID: \"f6baf467-334f-4b16-8460-f590e01d6f65\") " pod="openstack/barbican-db-create-ndx2b" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.406447 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-qhkxm"] Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.411178 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ccnkl\" (UniqueName: \"kubernetes.io/projected/f6baf467-334f-4b16-8460-f590e01d6f65-kube-api-access-ccnkl\") pod \"barbican-db-create-ndx2b\" (UID: \"f6baf467-334f-4b16-8460-f590e01d6f65\") " pod="openstack/barbican-db-create-ndx2b" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.413891 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-ndx2b" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.431293 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r9xwk\" (UniqueName: \"kubernetes.io/projected/f65975c6-56d0-456e-8c7c-ac900b682f94-kube-api-access-r9xwk\") pod \"barbican-5a3f-account-create-mzlm9\" (UID: \"f65975c6-56d0-456e-8c7c-ac900b682f94\") " pod="openstack/barbican-5a3f-account-create-mzlm9" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.493988 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc9bcdae-4537-427d-a3f1-064ae62d7b62-combined-ca-bundle\") pod \"keystone-db-sync-qpt4v\" (UID: \"cc9bcdae-4537-427d-a3f1-064ae62d7b62\") " pod="openstack/keystone-db-sync-qpt4v" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.494230 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c10632e3-be65-42c8-b358-b3ce41252b94-operator-scripts\") pod \"neutron-db-create-qhkxm\" (UID: \"c10632e3-be65-42c8-b358-b3ce41252b94\") " pod="openstack/neutron-db-create-qhkxm" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.496102 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc9bcdae-4537-427d-a3f1-064ae62d7b62-config-data\") pod \"keystone-db-sync-qpt4v\" (UID: \"cc9bcdae-4537-427d-a3f1-064ae62d7b62\") " pod="openstack/keystone-db-sync-qpt4v" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.496196 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pzn6g\" (UniqueName: \"kubernetes.io/projected/cc9bcdae-4537-427d-a3f1-064ae62d7b62-kube-api-access-pzn6g\") pod \"keystone-db-sync-qpt4v\" (UID: \"cc9bcdae-4537-427d-a3f1-064ae62d7b62\") " pod="openstack/keystone-db-sync-qpt4v" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.496320 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-26tml\" (UniqueName: \"kubernetes.io/projected/c10632e3-be65-42c8-b358-b3ce41252b94-kube-api-access-26tml\") pod \"neutron-db-create-qhkxm\" (UID: \"c10632e3-be65-42c8-b358-b3ce41252b94\") " pod="openstack/neutron-db-create-qhkxm" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.502758 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc9bcdae-4537-427d-a3f1-064ae62d7b62-config-data\") pod \"keystone-db-sync-qpt4v\" (UID: \"cc9bcdae-4537-427d-a3f1-064ae62d7b62\") " pod="openstack/keystone-db-sync-qpt4v" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.503760 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc9bcdae-4537-427d-a3f1-064ae62d7b62-combined-ca-bundle\") pod \"keystone-db-sync-qpt4v\" (UID: \"cc9bcdae-4537-427d-a3f1-064ae62d7b62\") " pod="openstack/keystone-db-sync-qpt4v" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.503915 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-5a3f-account-create-mzlm9" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.513206 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-f4f4-account-create-zlgh5"] Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.514465 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-f4f4-account-create-zlgh5" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.520328 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.539455 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-f4f4-account-create-zlgh5"] Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.545210 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pzn6g\" (UniqueName: \"kubernetes.io/projected/cc9bcdae-4537-427d-a3f1-064ae62d7b62-kube-api-access-pzn6g\") pod \"keystone-db-sync-qpt4v\" (UID: \"cc9bcdae-4537-427d-a3f1-064ae62d7b62\") " pod="openstack/keystone-db-sync-qpt4v" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.596433 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-qpt4v" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.598147 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-26tml\" (UniqueName: \"kubernetes.io/projected/c10632e3-be65-42c8-b358-b3ce41252b94-kube-api-access-26tml\") pod \"neutron-db-create-qhkxm\" (UID: \"c10632e3-be65-42c8-b358-b3ce41252b94\") " pod="openstack/neutron-db-create-qhkxm" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.598178 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vs4rc\" (UniqueName: \"kubernetes.io/projected/9a560774-b983-4fb0-b630-f9913688c130-kube-api-access-vs4rc\") pod \"neutron-f4f4-account-create-zlgh5\" (UID: \"9a560774-b983-4fb0-b630-f9913688c130\") " pod="openstack/neutron-f4f4-account-create-zlgh5" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.598236 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c10632e3-be65-42c8-b358-b3ce41252b94-operator-scripts\") pod \"neutron-db-create-qhkxm\" (UID: \"c10632e3-be65-42c8-b358-b3ce41252b94\") " pod="openstack/neutron-db-create-qhkxm" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.598311 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9a560774-b983-4fb0-b630-f9913688c130-operator-scripts\") pod \"neutron-f4f4-account-create-zlgh5\" (UID: \"9a560774-b983-4fb0-b630-f9913688c130\") " pod="openstack/neutron-f4f4-account-create-zlgh5" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.599354 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c10632e3-be65-42c8-b358-b3ce41252b94-operator-scripts\") pod \"neutron-db-create-qhkxm\" (UID: \"c10632e3-be65-42c8-b358-b3ce41252b94\") " pod="openstack/neutron-db-create-qhkxm" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.613494 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-61d1-account-create-znx74" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.625269 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-26tml\" (UniqueName: \"kubernetes.io/projected/c10632e3-be65-42c8-b358-b3ce41252b94-kube-api-access-26tml\") pod \"neutron-db-create-qhkxm\" (UID: \"c10632e3-be65-42c8-b358-b3ce41252b94\") " pod="openstack/neutron-db-create-qhkxm" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.699695 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9a560774-b983-4fb0-b630-f9913688c130-operator-scripts\") pod \"neutron-f4f4-account-create-zlgh5\" (UID: \"9a560774-b983-4fb0-b630-f9913688c130\") " pod="openstack/neutron-f4f4-account-create-zlgh5" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.699792 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vs4rc\" (UniqueName: \"kubernetes.io/projected/9a560774-b983-4fb0-b630-f9913688c130-kube-api-access-vs4rc\") pod \"neutron-f4f4-account-create-zlgh5\" (UID: \"9a560774-b983-4fb0-b630-f9913688c130\") " pod="openstack/neutron-f4f4-account-create-zlgh5" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.700374 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9a560774-b983-4fb0-b630-f9913688c130-operator-scripts\") pod \"neutron-f4f4-account-create-zlgh5\" (UID: \"9a560774-b983-4fb0-b630-f9913688c130\") " pod="openstack/neutron-f4f4-account-create-zlgh5" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.715452 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-qhkxm" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.721462 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vs4rc\" (UniqueName: \"kubernetes.io/projected/9a560774-b983-4fb0-b630-f9913688c130-kube-api-access-vs4rc\") pod \"neutron-f4f4-account-create-zlgh5\" (UID: \"9a560774-b983-4fb0-b630-f9913688c130\") " pod="openstack/neutron-f4f4-account-create-zlgh5" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.850282 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-f4f4-account-create-zlgh5" Nov 25 17:03:54 crc kubenswrapper[4812]: I1125 17:03:54.975555 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-xkff4"] Nov 25 17:03:55 crc kubenswrapper[4812]: I1125 17:03:55.021547 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-ndx2b"] Nov 25 17:03:55 crc kubenswrapper[4812]: I1125 17:03:55.186579 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-5a3f-account-create-mzlm9"] Nov 25 17:03:55 crc kubenswrapper[4812]: I1125 17:03:55.274079 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-qpt4v"] Nov 25 17:03:55 crc kubenswrapper[4812]: W1125 17:03:55.313391 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod28562fbc_1113_4840_a9cf_597672e44f69.slice/crio-baaced276bdf665cb6d24fc56d76e05f9df9a938894633eb89ea24774f7606cd WatchSource:0}: Error finding container baaced276bdf665cb6d24fc56d76e05f9df9a938894633eb89ea24774f7606cd: Status 404 returned error can't find the container with id baaced276bdf665cb6d24fc56d76e05f9df9a938894633eb89ea24774f7606cd Nov 25 17:03:55 crc kubenswrapper[4812]: W1125 17:03:55.314335 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf65975c6_56d0_456e_8c7c_ac900b682f94.slice/crio-135bd19bba62f0762d3af9f8183a35517ad6a3651c605c3fda21c17ca0b4000c WatchSource:0}: Error finding container 135bd19bba62f0762d3af9f8183a35517ad6a3651c605c3fda21c17ca0b4000c: Status 404 returned error can't find the container with id 135bd19bba62f0762d3af9f8183a35517ad6a3651c605c3fda21c17ca0b4000c Nov 25 17:03:55 crc kubenswrapper[4812]: I1125 17:03:55.329021 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-61d1-account-create-znx74"] Nov 25 17:03:55 crc kubenswrapper[4812]: I1125 17:03:55.334756 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-ndx2b" event={"ID":"f6baf467-334f-4b16-8460-f590e01d6f65","Type":"ContainerStarted","Data":"bd819db3dda14f488afbab1708950613f7b16849d1ff9d99906d23a257569dc7"} Nov 25 17:03:55 crc kubenswrapper[4812]: I1125 17:03:55.336880 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-qpt4v" event={"ID":"cc9bcdae-4537-427d-a3f1-064ae62d7b62","Type":"ContainerStarted","Data":"ddc4650eb0369a66d4dda5260a138205d35a8ae5912e41443099b7bf7aee3791"} Nov 25 17:03:55 crc kubenswrapper[4812]: I1125 17:03:55.338410 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-xkff4" event={"ID":"28562fbc-1113-4840-a9cf-597672e44f69","Type":"ContainerStarted","Data":"baaced276bdf665cb6d24fc56d76e05f9df9a938894633eb89ea24774f7606cd"} Nov 25 17:03:55 crc kubenswrapper[4812]: I1125 17:03:55.339342 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-5a3f-account-create-mzlm9" event={"ID":"f65975c6-56d0-456e-8c7c-ac900b682f94","Type":"ContainerStarted","Data":"135bd19bba62f0762d3af9f8183a35517ad6a3651c605c3fda21c17ca0b4000c"} Nov 25 17:03:55 crc kubenswrapper[4812]: W1125 17:03:55.574885 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod703cd6e3_90a5_4c10_9f88_d4faac8e24e0.slice/crio-65d6943a1f67153c0403a99761fcfd6d28995f63b774647f571f2be8b8d4babc WatchSource:0}: Error finding container 65d6943a1f67153c0403a99761fcfd6d28995f63b774647f571f2be8b8d4babc: Status 404 returned error can't find the container with id 65d6943a1f67153c0403a99761fcfd6d28995f63b774647f571f2be8b8d4babc Nov 25 17:03:55 crc kubenswrapper[4812]: I1125 17:03:55.744979 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-qhkxm"] Nov 25 17:03:55 crc kubenswrapper[4812]: W1125 17:03:55.760173 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc10632e3_be65_42c8_b358_b3ce41252b94.slice/crio-9705e1874c8b2f8ba44de5fd082e79011a41734fa086941c0c2164b3b21d0aa2 WatchSource:0}: Error finding container 9705e1874c8b2f8ba44de5fd082e79011a41734fa086941c0c2164b3b21d0aa2: Status 404 returned error can't find the container with id 9705e1874c8b2f8ba44de5fd082e79011a41734fa086941c0c2164b3b21d0aa2 Nov 25 17:03:56 crc kubenswrapper[4812]: I1125 17:03:56.101091 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-f4f4-account-create-zlgh5"] Nov 25 17:03:56 crc kubenswrapper[4812]: W1125 17:03:56.104955 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9a560774_b983_4fb0_b630_f9913688c130.slice/crio-3ea7336d7943aa97fd35801c02e557eb0c9c81af01dea64d38ff10f144bb56b1 WatchSource:0}: Error finding container 3ea7336d7943aa97fd35801c02e557eb0c9c81af01dea64d38ff10f144bb56b1: Status 404 returned error can't find the container with id 3ea7336d7943aa97fd35801c02e557eb0c9c81af01dea64d38ff10f144bb56b1 Nov 25 17:03:56 crc kubenswrapper[4812]: I1125 17:03:56.354173 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-qhkxm" event={"ID":"c10632e3-be65-42c8-b358-b3ce41252b94","Type":"ContainerStarted","Data":"4fa2c39f8ad70fc602ed1cd99aaaa438049bca404d0bde3c32d9f0525f10e667"} Nov 25 17:03:56 crc kubenswrapper[4812]: I1125 17:03:56.354560 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-qhkxm" event={"ID":"c10632e3-be65-42c8-b358-b3ce41252b94","Type":"ContainerStarted","Data":"9705e1874c8b2f8ba44de5fd082e79011a41734fa086941c0c2164b3b21d0aa2"} Nov 25 17:03:56 crc kubenswrapper[4812]: I1125 17:03:56.357197 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-61d1-account-create-znx74" event={"ID":"703cd6e3-90a5-4c10-9f88-d4faac8e24e0","Type":"ContainerStarted","Data":"bb01911dc1d7950253380ece32fe4578f6aaeb8137d0f039a8a42d48d4fe820f"} Nov 25 17:03:56 crc kubenswrapper[4812]: I1125 17:03:56.357223 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-61d1-account-create-znx74" event={"ID":"703cd6e3-90a5-4c10-9f88-d4faac8e24e0","Type":"ContainerStarted","Data":"65d6943a1f67153c0403a99761fcfd6d28995f63b774647f571f2be8b8d4babc"} Nov 25 17:03:56 crc kubenswrapper[4812]: I1125 17:03:56.361835 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-ndx2b" event={"ID":"f6baf467-334f-4b16-8460-f590e01d6f65","Type":"ContainerStarted","Data":"021f14bfccfbb38ca1b21a11feadfd808345ca858a903d9605536976645bdb6e"} Nov 25 17:03:56 crc kubenswrapper[4812]: I1125 17:03:56.364352 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-xkff4" event={"ID":"28562fbc-1113-4840-a9cf-597672e44f69","Type":"ContainerStarted","Data":"c5c80dc3c209e5141991b6c4de7812add55f88c1162c7e2772aa0b2f9bf77918"} Nov 25 17:03:56 crc kubenswrapper[4812]: I1125 17:03:56.370423 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-f4f4-account-create-zlgh5" event={"ID":"9a560774-b983-4fb0-b630-f9913688c130","Type":"ContainerStarted","Data":"3ea7336d7943aa97fd35801c02e557eb0c9c81af01dea64d38ff10f144bb56b1"} Nov 25 17:03:56 crc kubenswrapper[4812]: I1125 17:03:56.376283 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-create-qhkxm" podStartSLOduration=2.376258337 podStartE2EDuration="2.376258337s" podCreationTimestamp="2025-11-25 17:03:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:03:56.370026459 +0000 UTC m=+1011.210168554" watchObservedRunningTime="2025-11-25 17:03:56.376258337 +0000 UTC m=+1011.216400432" Nov 25 17:03:56 crc kubenswrapper[4812]: I1125 17:03:56.378892 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-5a3f-account-create-mzlm9" event={"ID":"f65975c6-56d0-456e-8c7c-ac900b682f94","Type":"ContainerStarted","Data":"a75baceb301573ab7802e7e7cc3342ff61ca92b4781545fc53200578e03318ad"} Nov 25 17:03:56 crc kubenswrapper[4812]: I1125 17:03:56.392013 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-create-ndx2b" podStartSLOduration=2.39197984 podStartE2EDuration="2.39197984s" podCreationTimestamp="2025-11-25 17:03:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:03:56.385401813 +0000 UTC m=+1011.225543938" watchObservedRunningTime="2025-11-25 17:03:56.39197984 +0000 UTC m=+1011.232121935" Nov 25 17:03:56 crc kubenswrapper[4812]: I1125 17:03:56.409849 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-create-xkff4" podStartSLOduration=3.409827241 podStartE2EDuration="3.409827241s" podCreationTimestamp="2025-11-25 17:03:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:03:56.402356679 +0000 UTC m=+1011.242498774" watchObservedRunningTime="2025-11-25 17:03:56.409827241 +0000 UTC m=+1011.249969336" Nov 25 17:03:56 crc kubenswrapper[4812]: I1125 17:03:56.428090 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-61d1-account-create-znx74" podStartSLOduration=3.4280726120000002 podStartE2EDuration="3.428072612s" podCreationTimestamp="2025-11-25 17:03:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:03:56.423133169 +0000 UTC m=+1011.263275264" watchObservedRunningTime="2025-11-25 17:03:56.428072612 +0000 UTC m=+1011.268214707" Nov 25 17:03:56 crc kubenswrapper[4812]: I1125 17:03:56.441371 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-5a3f-account-create-mzlm9" podStartSLOduration=2.441350089 podStartE2EDuration="2.441350089s" podCreationTimestamp="2025-11-25 17:03:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:03:56.435351348 +0000 UTC m=+1011.275493453" watchObservedRunningTime="2025-11-25 17:03:56.441350089 +0000 UTC m=+1011.281492184" Nov 25 17:03:57 crc kubenswrapper[4812]: I1125 17:03:57.334074 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:03:57 crc kubenswrapper[4812]: I1125 17:03:57.334141 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:03:57 crc kubenswrapper[4812]: I1125 17:03:57.388873 4812 generic.go:334] "Generic (PLEG): container finished" podID="703cd6e3-90a5-4c10-9f88-d4faac8e24e0" containerID="bb01911dc1d7950253380ece32fe4578f6aaeb8137d0f039a8a42d48d4fe820f" exitCode=0 Nov 25 17:03:57 crc kubenswrapper[4812]: I1125 17:03:57.389283 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-61d1-account-create-znx74" event={"ID":"703cd6e3-90a5-4c10-9f88-d4faac8e24e0","Type":"ContainerDied","Data":"bb01911dc1d7950253380ece32fe4578f6aaeb8137d0f039a8a42d48d4fe820f"} Nov 25 17:03:57 crc kubenswrapper[4812]: I1125 17:03:57.392437 4812 generic.go:334] "Generic (PLEG): container finished" podID="f6baf467-334f-4b16-8460-f590e01d6f65" containerID="021f14bfccfbb38ca1b21a11feadfd808345ca858a903d9605536976645bdb6e" exitCode=0 Nov 25 17:03:57 crc kubenswrapper[4812]: I1125 17:03:57.392502 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-ndx2b" event={"ID":"f6baf467-334f-4b16-8460-f590e01d6f65","Type":"ContainerDied","Data":"021f14bfccfbb38ca1b21a11feadfd808345ca858a903d9605536976645bdb6e"} Nov 25 17:03:57 crc kubenswrapper[4812]: I1125 17:03:57.394294 4812 generic.go:334] "Generic (PLEG): container finished" podID="28562fbc-1113-4840-a9cf-597672e44f69" containerID="c5c80dc3c209e5141991b6c4de7812add55f88c1162c7e2772aa0b2f9bf77918" exitCode=0 Nov 25 17:03:57 crc kubenswrapper[4812]: I1125 17:03:57.394357 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-xkff4" event={"ID":"28562fbc-1113-4840-a9cf-597672e44f69","Type":"ContainerDied","Data":"c5c80dc3c209e5141991b6c4de7812add55f88c1162c7e2772aa0b2f9bf77918"} Nov 25 17:03:57 crc kubenswrapper[4812]: I1125 17:03:57.395635 4812 generic.go:334] "Generic (PLEG): container finished" podID="9a560774-b983-4fb0-b630-f9913688c130" containerID="9aa82f4b758e0c1d98cd7c211d55a1373488e8f2b352f694a96993aad2d8aab8" exitCode=0 Nov 25 17:03:57 crc kubenswrapper[4812]: I1125 17:03:57.395683 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-f4f4-account-create-zlgh5" event={"ID":"9a560774-b983-4fb0-b630-f9913688c130","Type":"ContainerDied","Data":"9aa82f4b758e0c1d98cd7c211d55a1373488e8f2b352f694a96993aad2d8aab8"} Nov 25 17:03:57 crc kubenswrapper[4812]: I1125 17:03:57.397245 4812 generic.go:334] "Generic (PLEG): container finished" podID="f65975c6-56d0-456e-8c7c-ac900b682f94" containerID="a75baceb301573ab7802e7e7cc3342ff61ca92b4781545fc53200578e03318ad" exitCode=0 Nov 25 17:03:57 crc kubenswrapper[4812]: I1125 17:03:57.397317 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-5a3f-account-create-mzlm9" event={"ID":"f65975c6-56d0-456e-8c7c-ac900b682f94","Type":"ContainerDied","Data":"a75baceb301573ab7802e7e7cc3342ff61ca92b4781545fc53200578e03318ad"} Nov 25 17:03:57 crc kubenswrapper[4812]: I1125 17:03:57.400350 4812 generic.go:334] "Generic (PLEG): container finished" podID="c10632e3-be65-42c8-b358-b3ce41252b94" containerID="4fa2c39f8ad70fc602ed1cd99aaaa438049bca404d0bde3c32d9f0525f10e667" exitCode=0 Nov 25 17:03:57 crc kubenswrapper[4812]: I1125 17:03:57.400387 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-qhkxm" event={"ID":"c10632e3-be65-42c8-b358-b3ce41252b94","Type":"ContainerDied","Data":"4fa2c39f8ad70fc602ed1cd99aaaa438049bca404d0bde3c32d9f0525f10e667"} Nov 25 17:04:00 crc kubenswrapper[4812]: I1125 17:04:00.935521 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-54f9b7b8d9-w8rxh" Nov 25 17:04:00 crc kubenswrapper[4812]: I1125 17:04:00.999565 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-6tr4l"] Nov 25 17:04:01 crc kubenswrapper[4812]: I1125 17:04:01.000071 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-86db49b7ff-6tr4l" podUID="e0f9d008-45d4-470b-b3f5-b4713a8730dd" containerName="dnsmasq-dns" containerID="cri-o://32e0e4507b955f305c62c0c339a7ef214d4cddd6325e9baf88c34d4974bab7f7" gracePeriod=10 Nov 25 17:04:01 crc kubenswrapper[4812]: I1125 17:04:01.964695 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-61d1-account-create-znx74" Nov 25 17:04:01 crc kubenswrapper[4812]: I1125 17:04:01.987134 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-f4f4-account-create-zlgh5" Nov 25 17:04:01 crc kubenswrapper[4812]: I1125 17:04:01.993047 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-qhkxm" Nov 25 17:04:01 crc kubenswrapper[4812]: I1125 17:04:01.997917 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-5a3f-account-create-mzlm9" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.017774 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-xkff4" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.035770 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-ndx2b" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.063302 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/703cd6e3-90a5-4c10-9f88-d4faac8e24e0-operator-scripts\") pod \"703cd6e3-90a5-4c10-9f88-d4faac8e24e0\" (UID: \"703cd6e3-90a5-4c10-9f88-d4faac8e24e0\") " Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.063740 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7vltp\" (UniqueName: \"kubernetes.io/projected/703cd6e3-90a5-4c10-9f88-d4faac8e24e0-kube-api-access-7vltp\") pod \"703cd6e3-90a5-4c10-9f88-d4faac8e24e0\" (UID: \"703cd6e3-90a5-4c10-9f88-d4faac8e24e0\") " Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.065278 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/703cd6e3-90a5-4c10-9f88-d4faac8e24e0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "703cd6e3-90a5-4c10-9f88-d4faac8e24e0" (UID: "703cd6e3-90a5-4c10-9f88-d4faac8e24e0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.071349 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/703cd6e3-90a5-4c10-9f88-d4faac8e24e0-kube-api-access-7vltp" (OuterVolumeSpecName: "kube-api-access-7vltp") pod "703cd6e3-90a5-4c10-9f88-d4faac8e24e0" (UID: "703cd6e3-90a5-4c10-9f88-d4faac8e24e0"). InnerVolumeSpecName "kube-api-access-7vltp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.165710 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vs4rc\" (UniqueName: \"kubernetes.io/projected/9a560774-b983-4fb0-b630-f9913688c130-kube-api-access-vs4rc\") pod \"9a560774-b983-4fb0-b630-f9913688c130\" (UID: \"9a560774-b983-4fb0-b630-f9913688c130\") " Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.165805 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ccnkl\" (UniqueName: \"kubernetes.io/projected/f6baf467-334f-4b16-8460-f590e01d6f65-kube-api-access-ccnkl\") pod \"f6baf467-334f-4b16-8460-f590e01d6f65\" (UID: \"f6baf467-334f-4b16-8460-f590e01d6f65\") " Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.165844 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/28562fbc-1113-4840-a9cf-597672e44f69-operator-scripts\") pod \"28562fbc-1113-4840-a9cf-597672e44f69\" (UID: \"28562fbc-1113-4840-a9cf-597672e44f69\") " Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.165864 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9a560774-b983-4fb0-b630-f9913688c130-operator-scripts\") pod \"9a560774-b983-4fb0-b630-f9913688c130\" (UID: \"9a560774-b983-4fb0-b630-f9913688c130\") " Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.165885 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f6baf467-334f-4b16-8460-f590e01d6f65-operator-scripts\") pod \"f6baf467-334f-4b16-8460-f590e01d6f65\" (UID: \"f6baf467-334f-4b16-8460-f590e01d6f65\") " Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.165970 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c10632e3-be65-42c8-b358-b3ce41252b94-operator-scripts\") pod \"c10632e3-be65-42c8-b358-b3ce41252b94\" (UID: \"c10632e3-be65-42c8-b358-b3ce41252b94\") " Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.166018 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f65975c6-56d0-456e-8c7c-ac900b682f94-operator-scripts\") pod \"f65975c6-56d0-456e-8c7c-ac900b682f94\" (UID: \"f65975c6-56d0-456e-8c7c-ac900b682f94\") " Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.166050 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vl5mp\" (UniqueName: \"kubernetes.io/projected/28562fbc-1113-4840-a9cf-597672e44f69-kube-api-access-vl5mp\") pod \"28562fbc-1113-4840-a9cf-597672e44f69\" (UID: \"28562fbc-1113-4840-a9cf-597672e44f69\") " Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.166075 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r9xwk\" (UniqueName: \"kubernetes.io/projected/f65975c6-56d0-456e-8c7c-ac900b682f94-kube-api-access-r9xwk\") pod \"f65975c6-56d0-456e-8c7c-ac900b682f94\" (UID: \"f65975c6-56d0-456e-8c7c-ac900b682f94\") " Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.166101 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-26tml\" (UniqueName: \"kubernetes.io/projected/c10632e3-be65-42c8-b358-b3ce41252b94-kube-api-access-26tml\") pod \"c10632e3-be65-42c8-b358-b3ce41252b94\" (UID: \"c10632e3-be65-42c8-b358-b3ce41252b94\") " Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.166484 4812 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/703cd6e3-90a5-4c10-9f88-d4faac8e24e0-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.166500 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7vltp\" (UniqueName: \"kubernetes.io/projected/703cd6e3-90a5-4c10-9f88-d4faac8e24e0-kube-api-access-7vltp\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.167192 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f6baf467-334f-4b16-8460-f590e01d6f65-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f6baf467-334f-4b16-8460-f590e01d6f65" (UID: "f6baf467-334f-4b16-8460-f590e01d6f65"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.167225 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c10632e3-be65-42c8-b358-b3ce41252b94-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c10632e3-be65-42c8-b358-b3ce41252b94" (UID: "c10632e3-be65-42c8-b358-b3ce41252b94"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.167591 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9a560774-b983-4fb0-b630-f9913688c130-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9a560774-b983-4fb0-b630-f9913688c130" (UID: "9a560774-b983-4fb0-b630-f9913688c130"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.167746 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/28562fbc-1113-4840-a9cf-597672e44f69-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "28562fbc-1113-4840-a9cf-597672e44f69" (UID: "28562fbc-1113-4840-a9cf-597672e44f69"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.167863 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f65975c6-56d0-456e-8c7c-ac900b682f94-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f65975c6-56d0-456e-8c7c-ac900b682f94" (UID: "f65975c6-56d0-456e-8c7c-ac900b682f94"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.169644 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c10632e3-be65-42c8-b358-b3ce41252b94-kube-api-access-26tml" (OuterVolumeSpecName: "kube-api-access-26tml") pod "c10632e3-be65-42c8-b358-b3ce41252b94" (UID: "c10632e3-be65-42c8-b358-b3ce41252b94"). InnerVolumeSpecName "kube-api-access-26tml". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.170577 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f65975c6-56d0-456e-8c7c-ac900b682f94-kube-api-access-r9xwk" (OuterVolumeSpecName: "kube-api-access-r9xwk") pod "f65975c6-56d0-456e-8c7c-ac900b682f94" (UID: "f65975c6-56d0-456e-8c7c-ac900b682f94"). InnerVolumeSpecName "kube-api-access-r9xwk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.171155 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28562fbc-1113-4840-a9cf-597672e44f69-kube-api-access-vl5mp" (OuterVolumeSpecName: "kube-api-access-vl5mp") pod "28562fbc-1113-4840-a9cf-597672e44f69" (UID: "28562fbc-1113-4840-a9cf-597672e44f69"). InnerVolumeSpecName "kube-api-access-vl5mp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.171191 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a560774-b983-4fb0-b630-f9913688c130-kube-api-access-vs4rc" (OuterVolumeSpecName: "kube-api-access-vs4rc") pod "9a560774-b983-4fb0-b630-f9913688c130" (UID: "9a560774-b983-4fb0-b630-f9913688c130"). InnerVolumeSpecName "kube-api-access-vs4rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.171221 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f6baf467-334f-4b16-8460-f590e01d6f65-kube-api-access-ccnkl" (OuterVolumeSpecName: "kube-api-access-ccnkl") pod "f6baf467-334f-4b16-8460-f590e01d6f65" (UID: "f6baf467-334f-4b16-8460-f590e01d6f65"). InnerVolumeSpecName "kube-api-access-ccnkl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.267563 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vs4rc\" (UniqueName: \"kubernetes.io/projected/9a560774-b983-4fb0-b630-f9913688c130-kube-api-access-vs4rc\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.267597 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ccnkl\" (UniqueName: \"kubernetes.io/projected/f6baf467-334f-4b16-8460-f590e01d6f65-kube-api-access-ccnkl\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.267607 4812 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/28562fbc-1113-4840-a9cf-597672e44f69-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.267615 4812 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9a560774-b983-4fb0-b630-f9913688c130-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.267628 4812 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f6baf467-334f-4b16-8460-f590e01d6f65-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.267636 4812 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c10632e3-be65-42c8-b358-b3ce41252b94-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.267645 4812 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f65975c6-56d0-456e-8c7c-ac900b682f94-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.267654 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vl5mp\" (UniqueName: \"kubernetes.io/projected/28562fbc-1113-4840-a9cf-597672e44f69-kube-api-access-vl5mp\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.267661 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r9xwk\" (UniqueName: \"kubernetes.io/projected/f65975c6-56d0-456e-8c7c-ac900b682f94-kube-api-access-r9xwk\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.267669 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-26tml\" (UniqueName: \"kubernetes.io/projected/c10632e3-be65-42c8-b358-b3ce41252b94-kube-api-access-26tml\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.438972 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-61d1-account-create-znx74" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.438961 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-61d1-account-create-znx74" event={"ID":"703cd6e3-90a5-4c10-9f88-d4faac8e24e0","Type":"ContainerDied","Data":"65d6943a1f67153c0403a99761fcfd6d28995f63b774647f571f2be8b8d4babc"} Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.439119 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="65d6943a1f67153c0403a99761fcfd6d28995f63b774647f571f2be8b8d4babc" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.440573 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-ndx2b" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.440518 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-ndx2b" event={"ID":"f6baf467-334f-4b16-8460-f590e01d6f65","Type":"ContainerDied","Data":"bd819db3dda14f488afbab1708950613f7b16849d1ff9d99906d23a257569dc7"} Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.440790 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bd819db3dda14f488afbab1708950613f7b16849d1ff9d99906d23a257569dc7" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.442314 4812 generic.go:334] "Generic (PLEG): container finished" podID="e0f9d008-45d4-470b-b3f5-b4713a8730dd" containerID="32e0e4507b955f305c62c0c339a7ef214d4cddd6325e9baf88c34d4974bab7f7" exitCode=0 Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.442381 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-6tr4l" event={"ID":"e0f9d008-45d4-470b-b3f5-b4713a8730dd","Type":"ContainerDied","Data":"32e0e4507b955f305c62c0c339a7ef214d4cddd6325e9baf88c34d4974bab7f7"} Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.444252 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-xkff4" event={"ID":"28562fbc-1113-4840-a9cf-597672e44f69","Type":"ContainerDied","Data":"baaced276bdf665cb6d24fc56d76e05f9df9a938894633eb89ea24774f7606cd"} Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.444276 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-xkff4" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.444286 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="baaced276bdf665cb6d24fc56d76e05f9df9a938894633eb89ea24774f7606cd" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.447080 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-f4f4-account-create-zlgh5" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.447083 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-f4f4-account-create-zlgh5" event={"ID":"9a560774-b983-4fb0-b630-f9913688c130","Type":"ContainerDied","Data":"3ea7336d7943aa97fd35801c02e557eb0c9c81af01dea64d38ff10f144bb56b1"} Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.447198 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3ea7336d7943aa97fd35801c02e557eb0c9c81af01dea64d38ff10f144bb56b1" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.448658 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-5a3f-account-create-mzlm9" event={"ID":"f65975c6-56d0-456e-8c7c-ac900b682f94","Type":"ContainerDied","Data":"135bd19bba62f0762d3af9f8183a35517ad6a3651c605c3fda21c17ca0b4000c"} Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.448686 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="135bd19bba62f0762d3af9f8183a35517ad6a3651c605c3fda21c17ca0b4000c" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.448728 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-5a3f-account-create-mzlm9" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.454798 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-qhkxm" event={"ID":"c10632e3-be65-42c8-b358-b3ce41252b94","Type":"ContainerDied","Data":"9705e1874c8b2f8ba44de5fd082e79011a41734fa086941c0c2164b3b21d0aa2"} Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.454831 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9705e1874c8b2f8ba44de5fd082e79011a41734fa086941c0c2164b3b21d0aa2" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.454884 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-qhkxm" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.605595 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-6tr4l" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.775015 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n6hf4\" (UniqueName: \"kubernetes.io/projected/e0f9d008-45d4-470b-b3f5-b4713a8730dd-kube-api-access-n6hf4\") pod \"e0f9d008-45d4-470b-b3f5-b4713a8730dd\" (UID: \"e0f9d008-45d4-470b-b3f5-b4713a8730dd\") " Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.775672 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e0f9d008-45d4-470b-b3f5-b4713a8730dd-ovsdbserver-sb\") pod \"e0f9d008-45d4-470b-b3f5-b4713a8730dd\" (UID: \"e0f9d008-45d4-470b-b3f5-b4713a8730dd\") " Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.775877 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e0f9d008-45d4-470b-b3f5-b4713a8730dd-dns-svc\") pod \"e0f9d008-45d4-470b-b3f5-b4713a8730dd\" (UID: \"e0f9d008-45d4-470b-b3f5-b4713a8730dd\") " Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.776069 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0f9d008-45d4-470b-b3f5-b4713a8730dd-config\") pod \"e0f9d008-45d4-470b-b3f5-b4713a8730dd\" (UID: \"e0f9d008-45d4-470b-b3f5-b4713a8730dd\") " Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.776286 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e0f9d008-45d4-470b-b3f5-b4713a8730dd-ovsdbserver-nb\") pod \"e0f9d008-45d4-470b-b3f5-b4713a8730dd\" (UID: \"e0f9d008-45d4-470b-b3f5-b4713a8730dd\") " Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.778893 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e0f9d008-45d4-470b-b3f5-b4713a8730dd-kube-api-access-n6hf4" (OuterVolumeSpecName: "kube-api-access-n6hf4") pod "e0f9d008-45d4-470b-b3f5-b4713a8730dd" (UID: "e0f9d008-45d4-470b-b3f5-b4713a8730dd"). InnerVolumeSpecName "kube-api-access-n6hf4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.817764 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e0f9d008-45d4-470b-b3f5-b4713a8730dd-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e0f9d008-45d4-470b-b3f5-b4713a8730dd" (UID: "e0f9d008-45d4-470b-b3f5-b4713a8730dd"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.818509 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e0f9d008-45d4-470b-b3f5-b4713a8730dd-config" (OuterVolumeSpecName: "config") pod "e0f9d008-45d4-470b-b3f5-b4713a8730dd" (UID: "e0f9d008-45d4-470b-b3f5-b4713a8730dd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.821680 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e0f9d008-45d4-470b-b3f5-b4713a8730dd-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e0f9d008-45d4-470b-b3f5-b4713a8730dd" (UID: "e0f9d008-45d4-470b-b3f5-b4713a8730dd"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.823949 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e0f9d008-45d4-470b-b3f5-b4713a8730dd-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e0f9d008-45d4-470b-b3f5-b4713a8730dd" (UID: "e0f9d008-45d4-470b-b3f5-b4713a8730dd"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.879184 4812 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e0f9d008-45d4-470b-b3f5-b4713a8730dd-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.879217 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n6hf4\" (UniqueName: \"kubernetes.io/projected/e0f9d008-45d4-470b-b3f5-b4713a8730dd-kube-api-access-n6hf4\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.879226 4812 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e0f9d008-45d4-470b-b3f5-b4713a8730dd-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.879235 4812 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e0f9d008-45d4-470b-b3f5-b4713a8730dd-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:02 crc kubenswrapper[4812]: I1125 17:04:02.879243 4812 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0f9d008-45d4-470b-b3f5-b4713a8730dd-config\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:03 crc kubenswrapper[4812]: I1125 17:04:03.464563 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-qpt4v" event={"ID":"cc9bcdae-4537-427d-a3f1-064ae62d7b62","Type":"ContainerStarted","Data":"ad7559301665deafaf1f25165010182b34f3f7e759e0f0a19006c94e8d981908"} Nov 25 17:04:03 crc kubenswrapper[4812]: I1125 17:04:03.466943 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-6tr4l" event={"ID":"e0f9d008-45d4-470b-b3f5-b4713a8730dd","Type":"ContainerDied","Data":"79534fcbf259cbd42ad639bde879b4b0b258580ca92cd435a0052e59bbbb086a"} Nov 25 17:04:03 crc kubenswrapper[4812]: I1125 17:04:03.467117 4812 scope.go:117] "RemoveContainer" containerID="32e0e4507b955f305c62c0c339a7ef214d4cddd6325e9baf88c34d4974bab7f7" Nov 25 17:04:03 crc kubenswrapper[4812]: I1125 17:04:03.466998 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-6tr4l" Nov 25 17:04:03 crc kubenswrapper[4812]: I1125 17:04:03.485713 4812 scope.go:117] "RemoveContainer" containerID="47ec22ae7c7f93448d58f28d9b083c0f3c8c3a2755e908f310704609d0f97238" Nov 25 17:04:03 crc kubenswrapper[4812]: I1125 17:04:03.499720 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-qpt4v" podStartSLOduration=2.216189869 podStartE2EDuration="9.499700387s" podCreationTimestamp="2025-11-25 17:03:54 +0000 UTC" firstStartedPulling="2025-11-25 17:03:55.307687161 +0000 UTC m=+1010.147829256" lastFinishedPulling="2025-11-25 17:04:02.591197679 +0000 UTC m=+1017.431339774" observedRunningTime="2025-11-25 17:04:03.499269586 +0000 UTC m=+1018.339411691" watchObservedRunningTime="2025-11-25 17:04:03.499700387 +0000 UTC m=+1018.339842482" Nov 25 17:04:03 crc kubenswrapper[4812]: I1125 17:04:03.518830 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-6tr4l"] Nov 25 17:04:03 crc kubenswrapper[4812]: I1125 17:04:03.527387 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-6tr4l"] Nov 25 17:04:03 crc kubenswrapper[4812]: I1125 17:04:03.863937 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e0f9d008-45d4-470b-b3f5-b4713a8730dd" path="/var/lib/kubelet/pods/e0f9d008-45d4-470b-b3f5-b4713a8730dd/volumes" Nov 25 17:04:09 crc kubenswrapper[4812]: I1125 17:04:09.512148 4812 generic.go:334] "Generic (PLEG): container finished" podID="cc9bcdae-4537-427d-a3f1-064ae62d7b62" containerID="ad7559301665deafaf1f25165010182b34f3f7e759e0f0a19006c94e8d981908" exitCode=0 Nov 25 17:04:09 crc kubenswrapper[4812]: I1125 17:04:09.512243 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-qpt4v" event={"ID":"cc9bcdae-4537-427d-a3f1-064ae62d7b62","Type":"ContainerDied","Data":"ad7559301665deafaf1f25165010182b34f3f7e759e0f0a19006c94e8d981908"} Nov 25 17:04:10 crc kubenswrapper[4812]: I1125 17:04:10.802293 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-qpt4v" Nov 25 17:04:10 crc kubenswrapper[4812]: I1125 17:04:10.899039 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pzn6g\" (UniqueName: \"kubernetes.io/projected/cc9bcdae-4537-427d-a3f1-064ae62d7b62-kube-api-access-pzn6g\") pod \"cc9bcdae-4537-427d-a3f1-064ae62d7b62\" (UID: \"cc9bcdae-4537-427d-a3f1-064ae62d7b62\") " Nov 25 17:04:10 crc kubenswrapper[4812]: I1125 17:04:10.899198 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc9bcdae-4537-427d-a3f1-064ae62d7b62-config-data\") pod \"cc9bcdae-4537-427d-a3f1-064ae62d7b62\" (UID: \"cc9bcdae-4537-427d-a3f1-064ae62d7b62\") " Nov 25 17:04:10 crc kubenswrapper[4812]: I1125 17:04:10.899245 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc9bcdae-4537-427d-a3f1-064ae62d7b62-combined-ca-bundle\") pod \"cc9bcdae-4537-427d-a3f1-064ae62d7b62\" (UID: \"cc9bcdae-4537-427d-a3f1-064ae62d7b62\") " Nov 25 17:04:10 crc kubenswrapper[4812]: I1125 17:04:10.904159 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc9bcdae-4537-427d-a3f1-064ae62d7b62-kube-api-access-pzn6g" (OuterVolumeSpecName: "kube-api-access-pzn6g") pod "cc9bcdae-4537-427d-a3f1-064ae62d7b62" (UID: "cc9bcdae-4537-427d-a3f1-064ae62d7b62"). InnerVolumeSpecName "kube-api-access-pzn6g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:04:10 crc kubenswrapper[4812]: I1125 17:04:10.922904 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc9bcdae-4537-427d-a3f1-064ae62d7b62-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cc9bcdae-4537-427d-a3f1-064ae62d7b62" (UID: "cc9bcdae-4537-427d-a3f1-064ae62d7b62"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:04:10 crc kubenswrapper[4812]: I1125 17:04:10.957043 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc9bcdae-4537-427d-a3f1-064ae62d7b62-config-data" (OuterVolumeSpecName: "config-data") pod "cc9bcdae-4537-427d-a3f1-064ae62d7b62" (UID: "cc9bcdae-4537-427d-a3f1-064ae62d7b62"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.001776 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pzn6g\" (UniqueName: \"kubernetes.io/projected/cc9bcdae-4537-427d-a3f1-064ae62d7b62-kube-api-access-pzn6g\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.001818 4812 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc9bcdae-4537-427d-a3f1-064ae62d7b62-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.001834 4812 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc9bcdae-4537-427d-a3f1-064ae62d7b62-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.528157 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-qpt4v" event={"ID":"cc9bcdae-4537-427d-a3f1-064ae62d7b62","Type":"ContainerDied","Data":"ddc4650eb0369a66d4dda5260a138205d35a8ae5912e41443099b7bf7aee3791"} Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.528199 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ddc4650eb0369a66d4dda5260a138205d35a8ae5912e41443099b7bf7aee3791" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.528264 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-qpt4v" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.792835 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-wpwcb"] Nov 25 17:04:11 crc kubenswrapper[4812]: E1125 17:04:11.793486 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28562fbc-1113-4840-a9cf-597672e44f69" containerName="mariadb-database-create" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.793507 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="28562fbc-1113-4840-a9cf-597672e44f69" containerName="mariadb-database-create" Nov 25 17:04:11 crc kubenswrapper[4812]: E1125 17:04:11.793521 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c10632e3-be65-42c8-b358-b3ce41252b94" containerName="mariadb-database-create" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.793533 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="c10632e3-be65-42c8-b358-b3ce41252b94" containerName="mariadb-database-create" Nov 25 17:04:11 crc kubenswrapper[4812]: E1125 17:04:11.793562 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a560774-b983-4fb0-b630-f9913688c130" containerName="mariadb-account-create" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.793571 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a560774-b983-4fb0-b630-f9913688c130" containerName="mariadb-account-create" Nov 25 17:04:11 crc kubenswrapper[4812]: E1125 17:04:11.793587 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f65975c6-56d0-456e-8c7c-ac900b682f94" containerName="mariadb-account-create" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.793594 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="f65975c6-56d0-456e-8c7c-ac900b682f94" containerName="mariadb-account-create" Nov 25 17:04:11 crc kubenswrapper[4812]: E1125 17:04:11.793608 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="703cd6e3-90a5-4c10-9f88-d4faac8e24e0" containerName="mariadb-account-create" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.793616 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="703cd6e3-90a5-4c10-9f88-d4faac8e24e0" containerName="mariadb-account-create" Nov 25 17:04:11 crc kubenswrapper[4812]: E1125 17:04:11.793626 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0f9d008-45d4-470b-b3f5-b4713a8730dd" containerName="dnsmasq-dns" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.793634 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0f9d008-45d4-470b-b3f5-b4713a8730dd" containerName="dnsmasq-dns" Nov 25 17:04:11 crc kubenswrapper[4812]: E1125 17:04:11.793645 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc9bcdae-4537-427d-a3f1-064ae62d7b62" containerName="keystone-db-sync" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.793652 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc9bcdae-4537-427d-a3f1-064ae62d7b62" containerName="keystone-db-sync" Nov 25 17:04:11 crc kubenswrapper[4812]: E1125 17:04:11.793672 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6baf467-334f-4b16-8460-f590e01d6f65" containerName="mariadb-database-create" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.793680 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6baf467-334f-4b16-8460-f590e01d6f65" containerName="mariadb-database-create" Nov 25 17:04:11 crc kubenswrapper[4812]: E1125 17:04:11.793714 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0f9d008-45d4-470b-b3f5-b4713a8730dd" containerName="init" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.793722 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0f9d008-45d4-470b-b3f5-b4713a8730dd" containerName="init" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.793881 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="c10632e3-be65-42c8-b358-b3ce41252b94" containerName="mariadb-database-create" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.793898 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="703cd6e3-90a5-4c10-9f88-d4faac8e24e0" containerName="mariadb-account-create" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.793909 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc9bcdae-4537-427d-a3f1-064ae62d7b62" containerName="keystone-db-sync" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.793924 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="f65975c6-56d0-456e-8c7c-ac900b682f94" containerName="mariadb-account-create" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.793938 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a560774-b983-4fb0-b630-f9913688c130" containerName="mariadb-account-create" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.793951 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0f9d008-45d4-470b-b3f5-b4713a8730dd" containerName="dnsmasq-dns" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.793962 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="28562fbc-1113-4840-a9cf-597672e44f69" containerName="mariadb-database-create" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.793970 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6baf467-334f-4b16-8460-f590e01d6f65" containerName="mariadb-database-create" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.794535 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-wpwcb" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.797201 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.798737 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.799163 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.799393 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.804802 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-s426j" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.820489 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6546db6db7-kt56k"] Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.822391 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6546db6db7-kt56k" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.920753 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ca6910b9-555a-465c-9c8d-42e5a806f084-credential-keys\") pod \"keystone-bootstrap-wpwcb\" (UID: \"ca6910b9-555a-465c-9c8d-42e5a806f084\") " pod="openstack/keystone-bootstrap-wpwcb" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.920998 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e6ecb97d-9b95-4f80-888a-6a7574f89ab6-ovsdbserver-nb\") pod \"dnsmasq-dns-6546db6db7-kt56k\" (UID: \"e6ecb97d-9b95-4f80-888a-6a7574f89ab6\") " pod="openstack/dnsmasq-dns-6546db6db7-kt56k" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.921092 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ca6910b9-555a-465c-9c8d-42e5a806f084-fernet-keys\") pod \"keystone-bootstrap-wpwcb\" (UID: \"ca6910b9-555a-465c-9c8d-42e5a806f084\") " pod="openstack/keystone-bootstrap-wpwcb" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.921192 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e6ecb97d-9b95-4f80-888a-6a7574f89ab6-dns-svc\") pod \"dnsmasq-dns-6546db6db7-kt56k\" (UID: \"e6ecb97d-9b95-4f80-888a-6a7574f89ab6\") " pod="openstack/dnsmasq-dns-6546db6db7-kt56k" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.921315 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ca6910b9-555a-465c-9c8d-42e5a806f084-scripts\") pod \"keystone-bootstrap-wpwcb\" (UID: \"ca6910b9-555a-465c-9c8d-42e5a806f084\") " pod="openstack/keystone-bootstrap-wpwcb" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.921387 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lrsxf\" (UniqueName: \"kubernetes.io/projected/ca6910b9-555a-465c-9c8d-42e5a806f084-kube-api-access-lrsxf\") pod \"keystone-bootstrap-wpwcb\" (UID: \"ca6910b9-555a-465c-9c8d-42e5a806f084\") " pod="openstack/keystone-bootstrap-wpwcb" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.921464 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca6910b9-555a-465c-9c8d-42e5a806f084-config-data\") pod \"keystone-bootstrap-wpwcb\" (UID: \"ca6910b9-555a-465c-9c8d-42e5a806f084\") " pod="openstack/keystone-bootstrap-wpwcb" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.921541 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e6ecb97d-9b95-4f80-888a-6a7574f89ab6-ovsdbserver-sb\") pod \"dnsmasq-dns-6546db6db7-kt56k\" (UID: \"e6ecb97d-9b95-4f80-888a-6a7574f89ab6\") " pod="openstack/dnsmasq-dns-6546db6db7-kt56k" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.921656 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9s2gr\" (UniqueName: \"kubernetes.io/projected/e6ecb97d-9b95-4f80-888a-6a7574f89ab6-kube-api-access-9s2gr\") pod \"dnsmasq-dns-6546db6db7-kt56k\" (UID: \"e6ecb97d-9b95-4f80-888a-6a7574f89ab6\") " pod="openstack/dnsmasq-dns-6546db6db7-kt56k" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.921721 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca6910b9-555a-465c-9c8d-42e5a806f084-combined-ca-bundle\") pod \"keystone-bootstrap-wpwcb\" (UID: \"ca6910b9-555a-465c-9c8d-42e5a806f084\") " pod="openstack/keystone-bootstrap-wpwcb" Nov 25 17:04:11 crc kubenswrapper[4812]: I1125 17:04:11.921791 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e6ecb97d-9b95-4f80-888a-6a7574f89ab6-config\") pod \"dnsmasq-dns-6546db6db7-kt56k\" (UID: \"e6ecb97d-9b95-4f80-888a-6a7574f89ab6\") " pod="openstack/dnsmasq-dns-6546db6db7-kt56k" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.025296 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ca6910b9-555a-465c-9c8d-42e5a806f084-scripts\") pod \"keystone-bootstrap-wpwcb\" (UID: \"ca6910b9-555a-465c-9c8d-42e5a806f084\") " pod="openstack/keystone-bootstrap-wpwcb" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.025352 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lrsxf\" (UniqueName: \"kubernetes.io/projected/ca6910b9-555a-465c-9c8d-42e5a806f084-kube-api-access-lrsxf\") pod \"keystone-bootstrap-wpwcb\" (UID: \"ca6910b9-555a-465c-9c8d-42e5a806f084\") " pod="openstack/keystone-bootstrap-wpwcb" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.025381 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca6910b9-555a-465c-9c8d-42e5a806f084-config-data\") pod \"keystone-bootstrap-wpwcb\" (UID: \"ca6910b9-555a-465c-9c8d-42e5a806f084\") " pod="openstack/keystone-bootstrap-wpwcb" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.025403 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e6ecb97d-9b95-4f80-888a-6a7574f89ab6-ovsdbserver-sb\") pod \"dnsmasq-dns-6546db6db7-kt56k\" (UID: \"e6ecb97d-9b95-4f80-888a-6a7574f89ab6\") " pod="openstack/dnsmasq-dns-6546db6db7-kt56k" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.025448 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9s2gr\" (UniqueName: \"kubernetes.io/projected/e6ecb97d-9b95-4f80-888a-6a7574f89ab6-kube-api-access-9s2gr\") pod \"dnsmasq-dns-6546db6db7-kt56k\" (UID: \"e6ecb97d-9b95-4f80-888a-6a7574f89ab6\") " pod="openstack/dnsmasq-dns-6546db6db7-kt56k" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.025466 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca6910b9-555a-465c-9c8d-42e5a806f084-combined-ca-bundle\") pod \"keystone-bootstrap-wpwcb\" (UID: \"ca6910b9-555a-465c-9c8d-42e5a806f084\") " pod="openstack/keystone-bootstrap-wpwcb" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.025490 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e6ecb97d-9b95-4f80-888a-6a7574f89ab6-config\") pod \"dnsmasq-dns-6546db6db7-kt56k\" (UID: \"e6ecb97d-9b95-4f80-888a-6a7574f89ab6\") " pod="openstack/dnsmasq-dns-6546db6db7-kt56k" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.025568 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ca6910b9-555a-465c-9c8d-42e5a806f084-credential-keys\") pod \"keystone-bootstrap-wpwcb\" (UID: \"ca6910b9-555a-465c-9c8d-42e5a806f084\") " pod="openstack/keystone-bootstrap-wpwcb" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.025596 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e6ecb97d-9b95-4f80-888a-6a7574f89ab6-ovsdbserver-nb\") pod \"dnsmasq-dns-6546db6db7-kt56k\" (UID: \"e6ecb97d-9b95-4f80-888a-6a7574f89ab6\") " pod="openstack/dnsmasq-dns-6546db6db7-kt56k" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.025621 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ca6910b9-555a-465c-9c8d-42e5a806f084-fernet-keys\") pod \"keystone-bootstrap-wpwcb\" (UID: \"ca6910b9-555a-465c-9c8d-42e5a806f084\") " pod="openstack/keystone-bootstrap-wpwcb" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.025676 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e6ecb97d-9b95-4f80-888a-6a7574f89ab6-dns-svc\") pod \"dnsmasq-dns-6546db6db7-kt56k\" (UID: \"e6ecb97d-9b95-4f80-888a-6a7574f89ab6\") " pod="openstack/dnsmasq-dns-6546db6db7-kt56k" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.027373 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e6ecb97d-9b95-4f80-888a-6a7574f89ab6-ovsdbserver-nb\") pod \"dnsmasq-dns-6546db6db7-kt56k\" (UID: \"e6ecb97d-9b95-4f80-888a-6a7574f89ab6\") " pod="openstack/dnsmasq-dns-6546db6db7-kt56k" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.027489 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e6ecb97d-9b95-4f80-888a-6a7574f89ab6-ovsdbserver-sb\") pod \"dnsmasq-dns-6546db6db7-kt56k\" (UID: \"e6ecb97d-9b95-4f80-888a-6a7574f89ab6\") " pod="openstack/dnsmasq-dns-6546db6db7-kt56k" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.027937 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e6ecb97d-9b95-4f80-888a-6a7574f89ab6-config\") pod \"dnsmasq-dns-6546db6db7-kt56k\" (UID: \"e6ecb97d-9b95-4f80-888a-6a7574f89ab6\") " pod="openstack/dnsmasq-dns-6546db6db7-kt56k" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.036775 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ca6910b9-555a-465c-9c8d-42e5a806f084-credential-keys\") pod \"keystone-bootstrap-wpwcb\" (UID: \"ca6910b9-555a-465c-9c8d-42e5a806f084\") " pod="openstack/keystone-bootstrap-wpwcb" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.037145 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e6ecb97d-9b95-4f80-888a-6a7574f89ab6-dns-svc\") pod \"dnsmasq-dns-6546db6db7-kt56k\" (UID: \"e6ecb97d-9b95-4f80-888a-6a7574f89ab6\") " pod="openstack/dnsmasq-dns-6546db6db7-kt56k" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.038655 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ca6910b9-555a-465c-9c8d-42e5a806f084-scripts\") pod \"keystone-bootstrap-wpwcb\" (UID: \"ca6910b9-555a-465c-9c8d-42e5a806f084\") " pod="openstack/keystone-bootstrap-wpwcb" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.038918 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ca6910b9-555a-465c-9c8d-42e5a806f084-fernet-keys\") pod \"keystone-bootstrap-wpwcb\" (UID: \"ca6910b9-555a-465c-9c8d-42e5a806f084\") " pod="openstack/keystone-bootstrap-wpwcb" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.039001 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca6910b9-555a-465c-9c8d-42e5a806f084-combined-ca-bundle\") pod \"keystone-bootstrap-wpwcb\" (UID: \"ca6910b9-555a-465c-9c8d-42e5a806f084\") " pod="openstack/keystone-bootstrap-wpwcb" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.039925 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca6910b9-555a-465c-9c8d-42e5a806f084-config-data\") pod \"keystone-bootstrap-wpwcb\" (UID: \"ca6910b9-555a-465c-9c8d-42e5a806f084\") " pod="openstack/keystone-bootstrap-wpwcb" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.067385 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-wpwcb"] Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.067419 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6546db6db7-kt56k"] Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.067437 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-xz5j9"] Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.068554 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-xz5j9"] Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.068576 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-j4kgx"] Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.069261 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-j4kgx" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.070088 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-xz5j9" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.079439 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.080114 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.080283 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-h9qch" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.080788 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-rggnk" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.080918 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.081024 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.082168 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-j4kgx"] Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.084160 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9s2gr\" (UniqueName: \"kubernetes.io/projected/e6ecb97d-9b95-4f80-888a-6a7574f89ab6-kube-api-access-9s2gr\") pod \"dnsmasq-dns-6546db6db7-kt56k\" (UID: \"e6ecb97d-9b95-4f80-888a-6a7574f89ab6\") " pod="openstack/dnsmasq-dns-6546db6db7-kt56k" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.091472 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6546db6db7-kt56k" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.096361 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lrsxf\" (UniqueName: \"kubernetes.io/projected/ca6910b9-555a-465c-9c8d-42e5a806f084-kube-api-access-lrsxf\") pod \"keystone-bootstrap-wpwcb\" (UID: \"ca6910b9-555a-465c-9c8d-42e5a806f084\") " pod="openstack/keystone-bootstrap-wpwcb" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.115421 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-wpwcb" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.115922 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.118407 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.122976 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.123198 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.123342 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-t5rs4"] Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.124519 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-t5rs4" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.127146 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ee5fe32b-eefd-4847-a053-b72c9f06e3b1-scripts\") pod \"cinder-db-sync-xz5j9\" (UID: \"ee5fe32b-eefd-4847-a053-b72c9f06e3b1\") " pod="openstack/cinder-db-sync-xz5j9" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.127186 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-75mjh\" (UniqueName: \"kubernetes.io/projected/bb88081d-3888-485c-8105-d3ab11630457-kube-api-access-75mjh\") pod \"placement-db-sync-j4kgx\" (UID: \"bb88081d-3888-485c-8105-d3ab11630457\") " pod="openstack/placement-db-sync-j4kgx" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.127241 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb88081d-3888-485c-8105-d3ab11630457-config-data\") pod \"placement-db-sync-j4kgx\" (UID: \"bb88081d-3888-485c-8105-d3ab11630457\") " pod="openstack/placement-db-sync-j4kgx" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.127264 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb88081d-3888-485c-8105-d3ab11630457-combined-ca-bundle\") pod \"placement-db-sync-j4kgx\" (UID: \"bb88081d-3888-485c-8105-d3ab11630457\") " pod="openstack/placement-db-sync-j4kgx" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.127282 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ee5fe32b-eefd-4847-a053-b72c9f06e3b1-etc-machine-id\") pod \"cinder-db-sync-xz5j9\" (UID: \"ee5fe32b-eefd-4847-a053-b72c9f06e3b1\") " pod="openstack/cinder-db-sync-xz5j9" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.127309 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee5fe32b-eefd-4847-a053-b72c9f06e3b1-combined-ca-bundle\") pod \"cinder-db-sync-xz5j9\" (UID: \"ee5fe32b-eefd-4847-a053-b72c9f06e3b1\") " pod="openstack/cinder-db-sync-xz5j9" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.127349 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bb88081d-3888-485c-8105-d3ab11630457-scripts\") pod \"placement-db-sync-j4kgx\" (UID: \"bb88081d-3888-485c-8105-d3ab11630457\") " pod="openstack/placement-db-sync-j4kgx" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.127376 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee5fe32b-eefd-4847-a053-b72c9f06e3b1-config-data\") pod \"cinder-db-sync-xz5j9\" (UID: \"ee5fe32b-eefd-4847-a053-b72c9f06e3b1\") " pod="openstack/cinder-db-sync-xz5j9" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.127392 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jck2v\" (UniqueName: \"kubernetes.io/projected/ee5fe32b-eefd-4847-a053-b72c9f06e3b1-kube-api-access-jck2v\") pod \"cinder-db-sync-xz5j9\" (UID: \"ee5fe32b-eefd-4847-a053-b72c9f06e3b1\") " pod="openstack/cinder-db-sync-xz5j9" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.127420 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bb88081d-3888-485c-8105-d3ab11630457-logs\") pod \"placement-db-sync-j4kgx\" (UID: \"bb88081d-3888-485c-8105-d3ab11630457\") " pod="openstack/placement-db-sync-j4kgx" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.127459 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ee5fe32b-eefd-4847-a053-b72c9f06e3b1-db-sync-config-data\") pod \"cinder-db-sync-xz5j9\" (UID: \"ee5fe32b-eefd-4847-a053-b72c9f06e3b1\") " pod="openstack/cinder-db-sync-xz5j9" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.127818 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-n2tk2" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.133016 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.133247 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6546db6db7-kt56k"] Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.152509 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.166448 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-t5rs4"] Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.234939 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-f9kvr"] Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.236317 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6868h\" (UniqueName: \"kubernetes.io/projected/3f63bc45-88e5-4074-b40d-a2741fa63339-kube-api-access-6868h\") pod \"barbican-db-sync-t5rs4\" (UID: \"3f63bc45-88e5-4074-b40d-a2741fa63339\") " pod="openstack/barbican-db-sync-t5rs4" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.236379 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e50f7bc-1b06-447d-b556-6a7adc34b072-config-data\") pod \"ceilometer-0\" (UID: \"2e50f7bc-1b06-447d-b556-6a7adc34b072\") " pod="openstack/ceilometer-0" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.236412 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ee5fe32b-eefd-4847-a053-b72c9f06e3b1-db-sync-config-data\") pod \"cinder-db-sync-xz5j9\" (UID: \"ee5fe32b-eefd-4847-a053-b72c9f06e3b1\") " pod="openstack/cinder-db-sync-xz5j9" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.236449 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2e50f7bc-1b06-447d-b556-6a7adc34b072-run-httpd\") pod \"ceilometer-0\" (UID: \"2e50f7bc-1b06-447d-b556-6a7adc34b072\") " pod="openstack/ceilometer-0" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.236466 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2e50f7bc-1b06-447d-b556-6a7adc34b072-log-httpd\") pod \"ceilometer-0\" (UID: \"2e50f7bc-1b06-447d-b556-6a7adc34b072\") " pod="openstack/ceilometer-0" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.236494 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ee5fe32b-eefd-4847-a053-b72c9f06e3b1-scripts\") pod \"cinder-db-sync-xz5j9\" (UID: \"ee5fe32b-eefd-4847-a053-b72c9f06e3b1\") " pod="openstack/cinder-db-sync-xz5j9" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.236519 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-75mjh\" (UniqueName: \"kubernetes.io/projected/bb88081d-3888-485c-8105-d3ab11630457-kube-api-access-75mjh\") pod \"placement-db-sync-j4kgx\" (UID: \"bb88081d-3888-485c-8105-d3ab11630457\") " pod="openstack/placement-db-sync-j4kgx" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.236590 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e50f7bc-1b06-447d-b556-6a7adc34b072-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2e50f7bc-1b06-447d-b556-6a7adc34b072\") " pod="openstack/ceilometer-0" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.236626 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2e50f7bc-1b06-447d-b556-6a7adc34b072-scripts\") pod \"ceilometer-0\" (UID: \"2e50f7bc-1b06-447d-b556-6a7adc34b072\") " pod="openstack/ceilometer-0" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.236651 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f63bc45-88e5-4074-b40d-a2741fa63339-combined-ca-bundle\") pod \"barbican-db-sync-t5rs4\" (UID: \"3f63bc45-88e5-4074-b40d-a2741fa63339\") " pod="openstack/barbican-db-sync-t5rs4" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.236681 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb88081d-3888-485c-8105-d3ab11630457-config-data\") pod \"placement-db-sync-j4kgx\" (UID: \"bb88081d-3888-485c-8105-d3ab11630457\") " pod="openstack/placement-db-sync-j4kgx" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.236719 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/3f63bc45-88e5-4074-b40d-a2741fa63339-db-sync-config-data\") pod \"barbican-db-sync-t5rs4\" (UID: \"3f63bc45-88e5-4074-b40d-a2741fa63339\") " pod="openstack/barbican-db-sync-t5rs4" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.236744 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb88081d-3888-485c-8105-d3ab11630457-combined-ca-bundle\") pod \"placement-db-sync-j4kgx\" (UID: \"bb88081d-3888-485c-8105-d3ab11630457\") " pod="openstack/placement-db-sync-j4kgx" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.236777 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ee5fe32b-eefd-4847-a053-b72c9f06e3b1-etc-machine-id\") pod \"cinder-db-sync-xz5j9\" (UID: \"ee5fe32b-eefd-4847-a053-b72c9f06e3b1\") " pod="openstack/cinder-db-sync-xz5j9" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.236810 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee5fe32b-eefd-4847-a053-b72c9f06e3b1-combined-ca-bundle\") pod \"cinder-db-sync-xz5j9\" (UID: \"ee5fe32b-eefd-4847-a053-b72c9f06e3b1\") " pod="openstack/cinder-db-sync-xz5j9" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.236861 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2e50f7bc-1b06-447d-b556-6a7adc34b072-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2e50f7bc-1b06-447d-b556-6a7adc34b072\") " pod="openstack/ceilometer-0" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.236886 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bb88081d-3888-485c-8105-d3ab11630457-scripts\") pod \"placement-db-sync-j4kgx\" (UID: \"bb88081d-3888-485c-8105-d3ab11630457\") " pod="openstack/placement-db-sync-j4kgx" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.236937 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x8dkn\" (UniqueName: \"kubernetes.io/projected/2e50f7bc-1b06-447d-b556-6a7adc34b072-kube-api-access-x8dkn\") pod \"ceilometer-0\" (UID: \"2e50f7bc-1b06-447d-b556-6a7adc34b072\") " pod="openstack/ceilometer-0" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.236963 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee5fe32b-eefd-4847-a053-b72c9f06e3b1-config-data\") pod \"cinder-db-sync-xz5j9\" (UID: \"ee5fe32b-eefd-4847-a053-b72c9f06e3b1\") " pod="openstack/cinder-db-sync-xz5j9" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.236987 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jck2v\" (UniqueName: \"kubernetes.io/projected/ee5fe32b-eefd-4847-a053-b72c9f06e3b1-kube-api-access-jck2v\") pod \"cinder-db-sync-xz5j9\" (UID: \"ee5fe32b-eefd-4847-a053-b72c9f06e3b1\") " pod="openstack/cinder-db-sync-xz5j9" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.237018 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bb88081d-3888-485c-8105-d3ab11630457-logs\") pod \"placement-db-sync-j4kgx\" (UID: \"bb88081d-3888-485c-8105-d3ab11630457\") " pod="openstack/placement-db-sync-j4kgx" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.237803 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bb88081d-3888-485c-8105-d3ab11630457-logs\") pod \"placement-db-sync-j4kgx\" (UID: \"bb88081d-3888-485c-8105-d3ab11630457\") " pod="openstack/placement-db-sync-j4kgx" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.238082 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ee5fe32b-eefd-4847-a053-b72c9f06e3b1-etc-machine-id\") pod \"cinder-db-sync-xz5j9\" (UID: \"ee5fe32b-eefd-4847-a053-b72c9f06e3b1\") " pod="openstack/cinder-db-sync-xz5j9" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.238221 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-f9kvr" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.241027 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.241182 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-n4fjw" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.245804 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.253753 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee5fe32b-eefd-4847-a053-b72c9f06e3b1-config-data\") pod \"cinder-db-sync-xz5j9\" (UID: \"ee5fe32b-eefd-4847-a053-b72c9f06e3b1\") " pod="openstack/cinder-db-sync-xz5j9" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.254177 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ee5fe32b-eefd-4847-a053-b72c9f06e3b1-scripts\") pod \"cinder-db-sync-xz5j9\" (UID: \"ee5fe32b-eefd-4847-a053-b72c9f06e3b1\") " pod="openstack/cinder-db-sync-xz5j9" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.254484 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb88081d-3888-485c-8105-d3ab11630457-combined-ca-bundle\") pod \"placement-db-sync-j4kgx\" (UID: \"bb88081d-3888-485c-8105-d3ab11630457\") " pod="openstack/placement-db-sync-j4kgx" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.259182 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7987f74bbc-vhnzb"] Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.260935 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7987f74bbc-vhnzb" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.260983 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ee5fe32b-eefd-4847-a053-b72c9f06e3b1-db-sync-config-data\") pod \"cinder-db-sync-xz5j9\" (UID: \"ee5fe32b-eefd-4847-a053-b72c9f06e3b1\") " pod="openstack/cinder-db-sync-xz5j9" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.269185 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb88081d-3888-485c-8105-d3ab11630457-config-data\") pod \"placement-db-sync-j4kgx\" (UID: \"bb88081d-3888-485c-8105-d3ab11630457\") " pod="openstack/placement-db-sync-j4kgx" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.269402 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bb88081d-3888-485c-8105-d3ab11630457-scripts\") pod \"placement-db-sync-j4kgx\" (UID: \"bb88081d-3888-485c-8105-d3ab11630457\") " pod="openstack/placement-db-sync-j4kgx" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.269427 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee5fe32b-eefd-4847-a053-b72c9f06e3b1-combined-ca-bundle\") pod \"cinder-db-sync-xz5j9\" (UID: \"ee5fe32b-eefd-4847-a053-b72c9f06e3b1\") " pod="openstack/cinder-db-sync-xz5j9" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.282858 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-75mjh\" (UniqueName: \"kubernetes.io/projected/bb88081d-3888-485c-8105-d3ab11630457-kube-api-access-75mjh\") pod \"placement-db-sync-j4kgx\" (UID: \"bb88081d-3888-485c-8105-d3ab11630457\") " pod="openstack/placement-db-sync-j4kgx" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.284469 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-j4kgx" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.297477 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jck2v\" (UniqueName: \"kubernetes.io/projected/ee5fe32b-eefd-4847-a053-b72c9f06e3b1-kube-api-access-jck2v\") pod \"cinder-db-sync-xz5j9\" (UID: \"ee5fe32b-eefd-4847-a053-b72c9f06e3b1\") " pod="openstack/cinder-db-sync-xz5j9" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.298785 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-f9kvr"] Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.305463 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7987f74bbc-vhnzb"] Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.321085 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-xz5j9" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.338252 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e50f7bc-1b06-447d-b556-6a7adc34b072-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2e50f7bc-1b06-447d-b556-6a7adc34b072\") " pod="openstack/ceilometer-0" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.338310 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2e50f7bc-1b06-447d-b556-6a7adc34b072-scripts\") pod \"ceilometer-0\" (UID: \"2e50f7bc-1b06-447d-b556-6a7adc34b072\") " pod="openstack/ceilometer-0" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.338337 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f63bc45-88e5-4074-b40d-a2741fa63339-combined-ca-bundle\") pod \"barbican-db-sync-t5rs4\" (UID: \"3f63bc45-88e5-4074-b40d-a2741fa63339\") " pod="openstack/barbican-db-sync-t5rs4" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.338392 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/3f63bc45-88e5-4074-b40d-a2741fa63339-db-sync-config-data\") pod \"barbican-db-sync-t5rs4\" (UID: \"3f63bc45-88e5-4074-b40d-a2741fa63339\") " pod="openstack/barbican-db-sync-t5rs4" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.338449 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6982125a-a01a-4eed-a7ce-e335ef73e14d-ovsdbserver-sb\") pod \"dnsmasq-dns-7987f74bbc-vhnzb\" (UID: \"6982125a-a01a-4eed-a7ce-e335ef73e14d\") " pod="openstack/dnsmasq-dns-7987f74bbc-vhnzb" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.338474 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6982125a-a01a-4eed-a7ce-e335ef73e14d-ovsdbserver-nb\") pod \"dnsmasq-dns-7987f74bbc-vhnzb\" (UID: \"6982125a-a01a-4eed-a7ce-e335ef73e14d\") " pod="openstack/dnsmasq-dns-7987f74bbc-vhnzb" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.340275 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2e50f7bc-1b06-447d-b556-6a7adc34b072-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2e50f7bc-1b06-447d-b556-6a7adc34b072\") " pod="openstack/ceilometer-0" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.340315 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/046e94e3-a63c-490b-8fb1-db6592742208-combined-ca-bundle\") pod \"neutron-db-sync-f9kvr\" (UID: \"046e94e3-a63c-490b-8fb1-db6592742208\") " pod="openstack/neutron-db-sync-f9kvr" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.340364 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x8dkn\" (UniqueName: \"kubernetes.io/projected/2e50f7bc-1b06-447d-b556-6a7adc34b072-kube-api-access-x8dkn\") pod \"ceilometer-0\" (UID: \"2e50f7bc-1b06-447d-b556-6a7adc34b072\") " pod="openstack/ceilometer-0" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.340388 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/046e94e3-a63c-490b-8fb1-db6592742208-config\") pod \"neutron-db-sync-f9kvr\" (UID: \"046e94e3-a63c-490b-8fb1-db6592742208\") " pod="openstack/neutron-db-sync-f9kvr" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.340435 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6982125a-a01a-4eed-a7ce-e335ef73e14d-dns-svc\") pod \"dnsmasq-dns-7987f74bbc-vhnzb\" (UID: \"6982125a-a01a-4eed-a7ce-e335ef73e14d\") " pod="openstack/dnsmasq-dns-7987f74bbc-vhnzb" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.340456 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gxxp5\" (UniqueName: \"kubernetes.io/projected/6982125a-a01a-4eed-a7ce-e335ef73e14d-kube-api-access-gxxp5\") pod \"dnsmasq-dns-7987f74bbc-vhnzb\" (UID: \"6982125a-a01a-4eed-a7ce-e335ef73e14d\") " pod="openstack/dnsmasq-dns-7987f74bbc-vhnzb" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.340472 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tp27d\" (UniqueName: \"kubernetes.io/projected/046e94e3-a63c-490b-8fb1-db6592742208-kube-api-access-tp27d\") pod \"neutron-db-sync-f9kvr\" (UID: \"046e94e3-a63c-490b-8fb1-db6592742208\") " pod="openstack/neutron-db-sync-f9kvr" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.340499 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6868h\" (UniqueName: \"kubernetes.io/projected/3f63bc45-88e5-4074-b40d-a2741fa63339-kube-api-access-6868h\") pod \"barbican-db-sync-t5rs4\" (UID: \"3f63bc45-88e5-4074-b40d-a2741fa63339\") " pod="openstack/barbican-db-sync-t5rs4" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.340520 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e50f7bc-1b06-447d-b556-6a7adc34b072-config-data\") pod \"ceilometer-0\" (UID: \"2e50f7bc-1b06-447d-b556-6a7adc34b072\") " pod="openstack/ceilometer-0" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.340555 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6982125a-a01a-4eed-a7ce-e335ef73e14d-config\") pod \"dnsmasq-dns-7987f74bbc-vhnzb\" (UID: \"6982125a-a01a-4eed-a7ce-e335ef73e14d\") " pod="openstack/dnsmasq-dns-7987f74bbc-vhnzb" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.340582 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2e50f7bc-1b06-447d-b556-6a7adc34b072-run-httpd\") pod \"ceilometer-0\" (UID: \"2e50f7bc-1b06-447d-b556-6a7adc34b072\") " pod="openstack/ceilometer-0" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.340597 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2e50f7bc-1b06-447d-b556-6a7adc34b072-log-httpd\") pod \"ceilometer-0\" (UID: \"2e50f7bc-1b06-447d-b556-6a7adc34b072\") " pod="openstack/ceilometer-0" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.341220 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2e50f7bc-1b06-447d-b556-6a7adc34b072-log-httpd\") pod \"ceilometer-0\" (UID: \"2e50f7bc-1b06-447d-b556-6a7adc34b072\") " pod="openstack/ceilometer-0" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.341796 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2e50f7bc-1b06-447d-b556-6a7adc34b072-run-httpd\") pod \"ceilometer-0\" (UID: \"2e50f7bc-1b06-447d-b556-6a7adc34b072\") " pod="openstack/ceilometer-0" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.350748 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/3f63bc45-88e5-4074-b40d-a2741fa63339-db-sync-config-data\") pod \"barbican-db-sync-t5rs4\" (UID: \"3f63bc45-88e5-4074-b40d-a2741fa63339\") " pod="openstack/barbican-db-sync-t5rs4" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.351224 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f63bc45-88e5-4074-b40d-a2741fa63339-combined-ca-bundle\") pod \"barbican-db-sync-t5rs4\" (UID: \"3f63bc45-88e5-4074-b40d-a2741fa63339\") " pod="openstack/barbican-db-sync-t5rs4" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.353666 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2e50f7bc-1b06-447d-b556-6a7adc34b072-scripts\") pod \"ceilometer-0\" (UID: \"2e50f7bc-1b06-447d-b556-6a7adc34b072\") " pod="openstack/ceilometer-0" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.353780 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2e50f7bc-1b06-447d-b556-6a7adc34b072-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2e50f7bc-1b06-447d-b556-6a7adc34b072\") " pod="openstack/ceilometer-0" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.356062 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e50f7bc-1b06-447d-b556-6a7adc34b072-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2e50f7bc-1b06-447d-b556-6a7adc34b072\") " pod="openstack/ceilometer-0" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.356269 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e50f7bc-1b06-447d-b556-6a7adc34b072-config-data\") pod \"ceilometer-0\" (UID: \"2e50f7bc-1b06-447d-b556-6a7adc34b072\") " pod="openstack/ceilometer-0" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.364032 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x8dkn\" (UniqueName: \"kubernetes.io/projected/2e50f7bc-1b06-447d-b556-6a7adc34b072-kube-api-access-x8dkn\") pod \"ceilometer-0\" (UID: \"2e50f7bc-1b06-447d-b556-6a7adc34b072\") " pod="openstack/ceilometer-0" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.368412 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6868h\" (UniqueName: \"kubernetes.io/projected/3f63bc45-88e5-4074-b40d-a2741fa63339-kube-api-access-6868h\") pod \"barbican-db-sync-t5rs4\" (UID: \"3f63bc45-88e5-4074-b40d-a2741fa63339\") " pod="openstack/barbican-db-sync-t5rs4" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.442939 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6982125a-a01a-4eed-a7ce-e335ef73e14d-dns-svc\") pod \"dnsmasq-dns-7987f74bbc-vhnzb\" (UID: \"6982125a-a01a-4eed-a7ce-e335ef73e14d\") " pod="openstack/dnsmasq-dns-7987f74bbc-vhnzb" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.442981 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gxxp5\" (UniqueName: \"kubernetes.io/projected/6982125a-a01a-4eed-a7ce-e335ef73e14d-kube-api-access-gxxp5\") pod \"dnsmasq-dns-7987f74bbc-vhnzb\" (UID: \"6982125a-a01a-4eed-a7ce-e335ef73e14d\") " pod="openstack/dnsmasq-dns-7987f74bbc-vhnzb" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.443017 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tp27d\" (UniqueName: \"kubernetes.io/projected/046e94e3-a63c-490b-8fb1-db6592742208-kube-api-access-tp27d\") pod \"neutron-db-sync-f9kvr\" (UID: \"046e94e3-a63c-490b-8fb1-db6592742208\") " pod="openstack/neutron-db-sync-f9kvr" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.443071 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6982125a-a01a-4eed-a7ce-e335ef73e14d-config\") pod \"dnsmasq-dns-7987f74bbc-vhnzb\" (UID: \"6982125a-a01a-4eed-a7ce-e335ef73e14d\") " pod="openstack/dnsmasq-dns-7987f74bbc-vhnzb" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.443247 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6982125a-a01a-4eed-a7ce-e335ef73e14d-ovsdbserver-sb\") pod \"dnsmasq-dns-7987f74bbc-vhnzb\" (UID: \"6982125a-a01a-4eed-a7ce-e335ef73e14d\") " pod="openstack/dnsmasq-dns-7987f74bbc-vhnzb" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.443265 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6982125a-a01a-4eed-a7ce-e335ef73e14d-ovsdbserver-nb\") pod \"dnsmasq-dns-7987f74bbc-vhnzb\" (UID: \"6982125a-a01a-4eed-a7ce-e335ef73e14d\") " pod="openstack/dnsmasq-dns-7987f74bbc-vhnzb" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.443289 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/046e94e3-a63c-490b-8fb1-db6592742208-combined-ca-bundle\") pod \"neutron-db-sync-f9kvr\" (UID: \"046e94e3-a63c-490b-8fb1-db6592742208\") " pod="openstack/neutron-db-sync-f9kvr" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.443369 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/046e94e3-a63c-490b-8fb1-db6592742208-config\") pod \"neutron-db-sync-f9kvr\" (UID: \"046e94e3-a63c-490b-8fb1-db6592742208\") " pod="openstack/neutron-db-sync-f9kvr" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.445099 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6982125a-a01a-4eed-a7ce-e335ef73e14d-dns-svc\") pod \"dnsmasq-dns-7987f74bbc-vhnzb\" (UID: \"6982125a-a01a-4eed-a7ce-e335ef73e14d\") " pod="openstack/dnsmasq-dns-7987f74bbc-vhnzb" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.445506 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6982125a-a01a-4eed-a7ce-e335ef73e14d-ovsdbserver-sb\") pod \"dnsmasq-dns-7987f74bbc-vhnzb\" (UID: \"6982125a-a01a-4eed-a7ce-e335ef73e14d\") " pod="openstack/dnsmasq-dns-7987f74bbc-vhnzb" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.448199 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6982125a-a01a-4eed-a7ce-e335ef73e14d-ovsdbserver-nb\") pod \"dnsmasq-dns-7987f74bbc-vhnzb\" (UID: \"6982125a-a01a-4eed-a7ce-e335ef73e14d\") " pod="openstack/dnsmasq-dns-7987f74bbc-vhnzb" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.449085 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6982125a-a01a-4eed-a7ce-e335ef73e14d-config\") pod \"dnsmasq-dns-7987f74bbc-vhnzb\" (UID: \"6982125a-a01a-4eed-a7ce-e335ef73e14d\") " pod="openstack/dnsmasq-dns-7987f74bbc-vhnzb" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.451698 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/046e94e3-a63c-490b-8fb1-db6592742208-config\") pod \"neutron-db-sync-f9kvr\" (UID: \"046e94e3-a63c-490b-8fb1-db6592742208\") " pod="openstack/neutron-db-sync-f9kvr" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.451978 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/046e94e3-a63c-490b-8fb1-db6592742208-combined-ca-bundle\") pod \"neutron-db-sync-f9kvr\" (UID: \"046e94e3-a63c-490b-8fb1-db6592742208\") " pod="openstack/neutron-db-sync-f9kvr" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.463302 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tp27d\" (UniqueName: \"kubernetes.io/projected/046e94e3-a63c-490b-8fb1-db6592742208-kube-api-access-tp27d\") pod \"neutron-db-sync-f9kvr\" (UID: \"046e94e3-a63c-490b-8fb1-db6592742208\") " pod="openstack/neutron-db-sync-f9kvr" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.463540 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gxxp5\" (UniqueName: \"kubernetes.io/projected/6982125a-a01a-4eed-a7ce-e335ef73e14d-kube-api-access-gxxp5\") pod \"dnsmasq-dns-7987f74bbc-vhnzb\" (UID: \"6982125a-a01a-4eed-a7ce-e335ef73e14d\") " pod="openstack/dnsmasq-dns-7987f74bbc-vhnzb" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.639574 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-t5rs4" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.657144 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.672310 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-f9kvr" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.682317 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7987f74bbc-vhnzb" Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.742805 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-wpwcb"] Nov 25 17:04:12 crc kubenswrapper[4812]: W1125 17:04:12.760180 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podca6910b9_555a_465c_9c8d_42e5a806f084.slice/crio-c93dcaef5441d7673f9325b450751b5f8c70f810c517b595e5ad3727c421b2c0 WatchSource:0}: Error finding container c93dcaef5441d7673f9325b450751b5f8c70f810c517b595e5ad3727c421b2c0: Status 404 returned error can't find the container with id c93dcaef5441d7673f9325b450751b5f8c70f810c517b595e5ad3727c421b2c0 Nov 25 17:04:12 crc kubenswrapper[4812]: W1125 17:04:12.778890 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode6ecb97d_9b95_4f80_888a_6a7574f89ab6.slice/crio-78447dbbe89540f9234cfb14febed94b7ddd837ce790bc225c9fa20781791fef WatchSource:0}: Error finding container 78447dbbe89540f9234cfb14febed94b7ddd837ce790bc225c9fa20781791fef: Status 404 returned error can't find the container with id 78447dbbe89540f9234cfb14febed94b7ddd837ce790bc225c9fa20781791fef Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.794406 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6546db6db7-kt56k"] Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.895211 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-j4kgx"] Nov 25 17:04:12 crc kubenswrapper[4812]: I1125 17:04:12.903070 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-xz5j9"] Nov 25 17:04:12 crc kubenswrapper[4812]: W1125 17:04:12.903960 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbb88081d_3888_485c_8105_d3ab11630457.slice/crio-a2a20fb29859a109daf3e690e6d751205bf648dec036a6545bf78f25c6f8aedc WatchSource:0}: Error finding container a2a20fb29859a109daf3e690e6d751205bf648dec036a6545bf78f25c6f8aedc: Status 404 returned error can't find the container with id a2a20fb29859a109daf3e690e6d751205bf648dec036a6545bf78f25c6f8aedc Nov 25 17:04:13 crc kubenswrapper[4812]: I1125 17:04:13.195314 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-t5rs4"] Nov 25 17:04:13 crc kubenswrapper[4812]: I1125 17:04:13.265020 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-f9kvr"] Nov 25 17:04:13 crc kubenswrapper[4812]: W1125 17:04:13.274227 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod046e94e3_a63c_490b_8fb1_db6592742208.slice/crio-c42f7d1e55220be370b3201d335f1e65f0c46b61221d5a6b2781837b7b7762b5 WatchSource:0}: Error finding container c42f7d1e55220be370b3201d335f1e65f0c46b61221d5a6b2781837b7b7762b5: Status 404 returned error can't find the container with id c42f7d1e55220be370b3201d335f1e65f0c46b61221d5a6b2781837b7b7762b5 Nov 25 17:04:13 crc kubenswrapper[4812]: W1125 17:04:13.285621 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2e50f7bc_1b06_447d_b556_6a7adc34b072.slice/crio-a326aa7548140618ef18fd4e891d946427252442b97a9884c7425a18bce73ae2 WatchSource:0}: Error finding container a326aa7548140618ef18fd4e891d946427252442b97a9884c7425a18bce73ae2: Status 404 returned error can't find the container with id a326aa7548140618ef18fd4e891d946427252442b97a9884c7425a18bce73ae2 Nov 25 17:04:13 crc kubenswrapper[4812]: I1125 17:04:13.288341 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:04:13 crc kubenswrapper[4812]: I1125 17:04:13.389165 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7987f74bbc-vhnzb"] Nov 25 17:04:13 crc kubenswrapper[4812]: W1125 17:04:13.405645 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6982125a_a01a_4eed_a7ce_e335ef73e14d.slice/crio-81d027a07c3770f4c82c653687e7f164bec57fb90334d4f9960a1afce92a265a WatchSource:0}: Error finding container 81d027a07c3770f4c82c653687e7f164bec57fb90334d4f9960a1afce92a265a: Status 404 returned error can't find the container with id 81d027a07c3770f4c82c653687e7f164bec57fb90334d4f9960a1afce92a265a Nov 25 17:04:13 crc kubenswrapper[4812]: I1125 17:04:13.543840 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-f9kvr" event={"ID":"046e94e3-a63c-490b-8fb1-db6592742208","Type":"ContainerStarted","Data":"c42f7d1e55220be370b3201d335f1e65f0c46b61221d5a6b2781837b7b7762b5"} Nov 25 17:04:13 crc kubenswrapper[4812]: I1125 17:04:13.545436 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-xz5j9" event={"ID":"ee5fe32b-eefd-4847-a053-b72c9f06e3b1","Type":"ContainerStarted","Data":"42d9bde1b2e2efbb269a7fab47574a64374b896ab8a00f6da56556c9f1248643"} Nov 25 17:04:13 crc kubenswrapper[4812]: I1125 17:04:13.546971 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-wpwcb" event={"ID":"ca6910b9-555a-465c-9c8d-42e5a806f084","Type":"ContainerStarted","Data":"8e321d4100a069bc630e5af23f86169ad9ffa38828583660ef122bd32d47e596"} Nov 25 17:04:13 crc kubenswrapper[4812]: I1125 17:04:13.547002 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-wpwcb" event={"ID":"ca6910b9-555a-465c-9c8d-42e5a806f084","Type":"ContainerStarted","Data":"c93dcaef5441d7673f9325b450751b5f8c70f810c517b595e5ad3727c421b2c0"} Nov 25 17:04:13 crc kubenswrapper[4812]: I1125 17:04:13.548613 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2e50f7bc-1b06-447d-b556-6a7adc34b072","Type":"ContainerStarted","Data":"a326aa7548140618ef18fd4e891d946427252442b97a9884c7425a18bce73ae2"} Nov 25 17:04:13 crc kubenswrapper[4812]: I1125 17:04:13.550163 4812 generic.go:334] "Generic (PLEG): container finished" podID="e6ecb97d-9b95-4f80-888a-6a7574f89ab6" containerID="c72c42f17dae3d588642736c6ba7febe242b20ca232b302190753610002c3149" exitCode=0 Nov 25 17:04:13 crc kubenswrapper[4812]: I1125 17:04:13.550192 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6546db6db7-kt56k" event={"ID":"e6ecb97d-9b95-4f80-888a-6a7574f89ab6","Type":"ContainerDied","Data":"c72c42f17dae3d588642736c6ba7febe242b20ca232b302190753610002c3149"} Nov 25 17:04:13 crc kubenswrapper[4812]: I1125 17:04:13.550216 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6546db6db7-kt56k" event={"ID":"e6ecb97d-9b95-4f80-888a-6a7574f89ab6","Type":"ContainerStarted","Data":"78447dbbe89540f9234cfb14febed94b7ddd837ce790bc225c9fa20781791fef"} Nov 25 17:04:13 crc kubenswrapper[4812]: I1125 17:04:13.551322 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-j4kgx" event={"ID":"bb88081d-3888-485c-8105-d3ab11630457","Type":"ContainerStarted","Data":"a2a20fb29859a109daf3e690e6d751205bf648dec036a6545bf78f25c6f8aedc"} Nov 25 17:04:13 crc kubenswrapper[4812]: I1125 17:04:13.552278 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-t5rs4" event={"ID":"3f63bc45-88e5-4074-b40d-a2741fa63339","Type":"ContainerStarted","Data":"6041abd92dfec2d539726b5ad0607af5ce5bcb4acbb39c39cf35f0882a417745"} Nov 25 17:04:13 crc kubenswrapper[4812]: I1125 17:04:13.553442 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7987f74bbc-vhnzb" event={"ID":"6982125a-a01a-4eed-a7ce-e335ef73e14d","Type":"ContainerStarted","Data":"81d027a07c3770f4c82c653687e7f164bec57fb90334d4f9960a1afce92a265a"} Nov 25 17:04:13 crc kubenswrapper[4812]: I1125 17:04:13.568221 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-wpwcb" podStartSLOduration=2.5682052410000002 podStartE2EDuration="2.568205241s" podCreationTimestamp="2025-11-25 17:04:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:04:13.563592917 +0000 UTC m=+1028.403735012" watchObservedRunningTime="2025-11-25 17:04:13.568205241 +0000 UTC m=+1028.408347326" Nov 25 17:04:13 crc kubenswrapper[4812]: I1125 17:04:13.873829 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6546db6db7-kt56k" Nov 25 17:04:13 crc kubenswrapper[4812]: I1125 17:04:13.976995 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e6ecb97d-9b95-4f80-888a-6a7574f89ab6-dns-svc\") pod \"e6ecb97d-9b95-4f80-888a-6a7574f89ab6\" (UID: \"e6ecb97d-9b95-4f80-888a-6a7574f89ab6\") " Nov 25 17:04:13 crc kubenswrapper[4812]: I1125 17:04:13.977390 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9s2gr\" (UniqueName: \"kubernetes.io/projected/e6ecb97d-9b95-4f80-888a-6a7574f89ab6-kube-api-access-9s2gr\") pod \"e6ecb97d-9b95-4f80-888a-6a7574f89ab6\" (UID: \"e6ecb97d-9b95-4f80-888a-6a7574f89ab6\") " Nov 25 17:04:13 crc kubenswrapper[4812]: I1125 17:04:13.977451 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e6ecb97d-9b95-4f80-888a-6a7574f89ab6-ovsdbserver-sb\") pod \"e6ecb97d-9b95-4f80-888a-6a7574f89ab6\" (UID: \"e6ecb97d-9b95-4f80-888a-6a7574f89ab6\") " Nov 25 17:04:13 crc kubenswrapper[4812]: I1125 17:04:13.977580 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e6ecb97d-9b95-4f80-888a-6a7574f89ab6-ovsdbserver-nb\") pod \"e6ecb97d-9b95-4f80-888a-6a7574f89ab6\" (UID: \"e6ecb97d-9b95-4f80-888a-6a7574f89ab6\") " Nov 25 17:04:13 crc kubenswrapper[4812]: I1125 17:04:13.977697 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e6ecb97d-9b95-4f80-888a-6a7574f89ab6-config\") pod \"e6ecb97d-9b95-4f80-888a-6a7574f89ab6\" (UID: \"e6ecb97d-9b95-4f80-888a-6a7574f89ab6\") " Nov 25 17:04:13 crc kubenswrapper[4812]: I1125 17:04:13.987488 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e6ecb97d-9b95-4f80-888a-6a7574f89ab6-kube-api-access-9s2gr" (OuterVolumeSpecName: "kube-api-access-9s2gr") pod "e6ecb97d-9b95-4f80-888a-6a7574f89ab6" (UID: "e6ecb97d-9b95-4f80-888a-6a7574f89ab6"). InnerVolumeSpecName "kube-api-access-9s2gr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:04:14 crc kubenswrapper[4812]: I1125 17:04:14.001969 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e6ecb97d-9b95-4f80-888a-6a7574f89ab6-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e6ecb97d-9b95-4f80-888a-6a7574f89ab6" (UID: "e6ecb97d-9b95-4f80-888a-6a7574f89ab6"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:04:14 crc kubenswrapper[4812]: I1125 17:04:14.005086 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e6ecb97d-9b95-4f80-888a-6a7574f89ab6-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e6ecb97d-9b95-4f80-888a-6a7574f89ab6" (UID: "e6ecb97d-9b95-4f80-888a-6a7574f89ab6"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:04:14 crc kubenswrapper[4812]: I1125 17:04:14.006193 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e6ecb97d-9b95-4f80-888a-6a7574f89ab6-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e6ecb97d-9b95-4f80-888a-6a7574f89ab6" (UID: "e6ecb97d-9b95-4f80-888a-6a7574f89ab6"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:04:14 crc kubenswrapper[4812]: I1125 17:04:14.026517 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e6ecb97d-9b95-4f80-888a-6a7574f89ab6-config" (OuterVolumeSpecName: "config") pod "e6ecb97d-9b95-4f80-888a-6a7574f89ab6" (UID: "e6ecb97d-9b95-4f80-888a-6a7574f89ab6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:04:14 crc kubenswrapper[4812]: I1125 17:04:14.080320 4812 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e6ecb97d-9b95-4f80-888a-6a7574f89ab6-config\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:14 crc kubenswrapper[4812]: I1125 17:04:14.080362 4812 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e6ecb97d-9b95-4f80-888a-6a7574f89ab6-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:14 crc kubenswrapper[4812]: I1125 17:04:14.080376 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9s2gr\" (UniqueName: \"kubernetes.io/projected/e6ecb97d-9b95-4f80-888a-6a7574f89ab6-kube-api-access-9s2gr\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:14 crc kubenswrapper[4812]: I1125 17:04:14.080393 4812 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e6ecb97d-9b95-4f80-888a-6a7574f89ab6-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:14 crc kubenswrapper[4812]: I1125 17:04:14.080404 4812 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e6ecb97d-9b95-4f80-888a-6a7574f89ab6-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:14 crc kubenswrapper[4812]: I1125 17:04:14.234565 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:04:14 crc kubenswrapper[4812]: I1125 17:04:14.566741 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6546db6db7-kt56k" event={"ID":"e6ecb97d-9b95-4f80-888a-6a7574f89ab6","Type":"ContainerDied","Data":"78447dbbe89540f9234cfb14febed94b7ddd837ce790bc225c9fa20781791fef"} Nov 25 17:04:14 crc kubenswrapper[4812]: I1125 17:04:14.566808 4812 scope.go:117] "RemoveContainer" containerID="c72c42f17dae3d588642736c6ba7febe242b20ca232b302190753610002c3149" Nov 25 17:04:14 crc kubenswrapper[4812]: I1125 17:04:14.566922 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6546db6db7-kt56k" Nov 25 17:04:14 crc kubenswrapper[4812]: I1125 17:04:14.572623 4812 generic.go:334] "Generic (PLEG): container finished" podID="6982125a-a01a-4eed-a7ce-e335ef73e14d" containerID="03477f5e749b2090b26de554b708d6bdb4d61ebf9a12cfcda20fec08dafb2722" exitCode=0 Nov 25 17:04:14 crc kubenswrapper[4812]: I1125 17:04:14.572867 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7987f74bbc-vhnzb" event={"ID":"6982125a-a01a-4eed-a7ce-e335ef73e14d","Type":"ContainerDied","Data":"03477f5e749b2090b26de554b708d6bdb4d61ebf9a12cfcda20fec08dafb2722"} Nov 25 17:04:14 crc kubenswrapper[4812]: I1125 17:04:14.581959 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-f9kvr" event={"ID":"046e94e3-a63c-490b-8fb1-db6592742208","Type":"ContainerStarted","Data":"749d71bc9d70c64e1e650dd6acc3ee5198ff40adf1d41c141775b91a57048ead"} Nov 25 17:04:14 crc kubenswrapper[4812]: I1125 17:04:14.620021 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-f9kvr" podStartSLOduration=2.620005297 podStartE2EDuration="2.620005297s" podCreationTimestamp="2025-11-25 17:04:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:04:14.609045342 +0000 UTC m=+1029.449187457" watchObservedRunningTime="2025-11-25 17:04:14.620005297 +0000 UTC m=+1029.460147382" Nov 25 17:04:14 crc kubenswrapper[4812]: I1125 17:04:14.854929 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6546db6db7-kt56k"] Nov 25 17:04:14 crc kubenswrapper[4812]: I1125 17:04:14.872715 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6546db6db7-kt56k"] Nov 25 17:04:15 crc kubenswrapper[4812]: I1125 17:04:15.593302 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7987f74bbc-vhnzb" event={"ID":"6982125a-a01a-4eed-a7ce-e335ef73e14d","Type":"ContainerStarted","Data":"72d8a6e1e56a23074b759106bb7cbf2b3c5aaca0a8ba664db713913fee6d05db"} Nov 25 17:04:15 crc kubenswrapper[4812]: I1125 17:04:15.593841 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7987f74bbc-vhnzb" Nov 25 17:04:15 crc kubenswrapper[4812]: I1125 17:04:15.619659 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7987f74bbc-vhnzb" podStartSLOduration=3.619633578 podStartE2EDuration="3.619633578s" podCreationTimestamp="2025-11-25 17:04:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:04:15.611952151 +0000 UTC m=+1030.452094256" watchObservedRunningTime="2025-11-25 17:04:15.619633578 +0000 UTC m=+1030.459775683" Nov 25 17:04:15 crc kubenswrapper[4812]: I1125 17:04:15.846466 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e6ecb97d-9b95-4f80-888a-6a7574f89ab6" path="/var/lib/kubelet/pods/e6ecb97d-9b95-4f80-888a-6a7574f89ab6/volumes" Nov 25 17:04:17 crc kubenswrapper[4812]: I1125 17:04:17.612499 4812 generic.go:334] "Generic (PLEG): container finished" podID="ca6910b9-555a-465c-9c8d-42e5a806f084" containerID="8e321d4100a069bc630e5af23f86169ad9ffa38828583660ef122bd32d47e596" exitCode=0 Nov 25 17:04:17 crc kubenswrapper[4812]: I1125 17:04:17.612576 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-wpwcb" event={"ID":"ca6910b9-555a-465c-9c8d-42e5a806f084","Type":"ContainerDied","Data":"8e321d4100a069bc630e5af23f86169ad9ffa38828583660ef122bd32d47e596"} Nov 25 17:04:19 crc kubenswrapper[4812]: I1125 17:04:19.726161 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-wpwcb" Nov 25 17:04:19 crc kubenswrapper[4812]: I1125 17:04:19.801876 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca6910b9-555a-465c-9c8d-42e5a806f084-combined-ca-bundle\") pod \"ca6910b9-555a-465c-9c8d-42e5a806f084\" (UID: \"ca6910b9-555a-465c-9c8d-42e5a806f084\") " Nov 25 17:04:19 crc kubenswrapper[4812]: I1125 17:04:19.802239 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ca6910b9-555a-465c-9c8d-42e5a806f084-scripts\") pod \"ca6910b9-555a-465c-9c8d-42e5a806f084\" (UID: \"ca6910b9-555a-465c-9c8d-42e5a806f084\") " Nov 25 17:04:19 crc kubenswrapper[4812]: I1125 17:04:19.802284 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca6910b9-555a-465c-9c8d-42e5a806f084-config-data\") pod \"ca6910b9-555a-465c-9c8d-42e5a806f084\" (UID: \"ca6910b9-555a-465c-9c8d-42e5a806f084\") " Nov 25 17:04:19 crc kubenswrapper[4812]: I1125 17:04:19.802328 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ca6910b9-555a-465c-9c8d-42e5a806f084-fernet-keys\") pod \"ca6910b9-555a-465c-9c8d-42e5a806f084\" (UID: \"ca6910b9-555a-465c-9c8d-42e5a806f084\") " Nov 25 17:04:19 crc kubenswrapper[4812]: I1125 17:04:19.802366 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lrsxf\" (UniqueName: \"kubernetes.io/projected/ca6910b9-555a-465c-9c8d-42e5a806f084-kube-api-access-lrsxf\") pod \"ca6910b9-555a-465c-9c8d-42e5a806f084\" (UID: \"ca6910b9-555a-465c-9c8d-42e5a806f084\") " Nov 25 17:04:19 crc kubenswrapper[4812]: I1125 17:04:19.802466 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ca6910b9-555a-465c-9c8d-42e5a806f084-credential-keys\") pod \"ca6910b9-555a-465c-9c8d-42e5a806f084\" (UID: \"ca6910b9-555a-465c-9c8d-42e5a806f084\") " Nov 25 17:04:19 crc kubenswrapper[4812]: I1125 17:04:19.807860 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca6910b9-555a-465c-9c8d-42e5a806f084-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "ca6910b9-555a-465c-9c8d-42e5a806f084" (UID: "ca6910b9-555a-465c-9c8d-42e5a806f084"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:04:19 crc kubenswrapper[4812]: I1125 17:04:19.807895 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca6910b9-555a-465c-9c8d-42e5a806f084-scripts" (OuterVolumeSpecName: "scripts") pod "ca6910b9-555a-465c-9c8d-42e5a806f084" (UID: "ca6910b9-555a-465c-9c8d-42e5a806f084"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:04:19 crc kubenswrapper[4812]: I1125 17:04:19.808862 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca6910b9-555a-465c-9c8d-42e5a806f084-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "ca6910b9-555a-465c-9c8d-42e5a806f084" (UID: "ca6910b9-555a-465c-9c8d-42e5a806f084"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:04:19 crc kubenswrapper[4812]: I1125 17:04:19.818874 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca6910b9-555a-465c-9c8d-42e5a806f084-kube-api-access-lrsxf" (OuterVolumeSpecName: "kube-api-access-lrsxf") pod "ca6910b9-555a-465c-9c8d-42e5a806f084" (UID: "ca6910b9-555a-465c-9c8d-42e5a806f084"). InnerVolumeSpecName "kube-api-access-lrsxf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:04:19 crc kubenswrapper[4812]: I1125 17:04:19.830057 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca6910b9-555a-465c-9c8d-42e5a806f084-config-data" (OuterVolumeSpecName: "config-data") pod "ca6910b9-555a-465c-9c8d-42e5a806f084" (UID: "ca6910b9-555a-465c-9c8d-42e5a806f084"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:04:19 crc kubenswrapper[4812]: I1125 17:04:19.835046 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca6910b9-555a-465c-9c8d-42e5a806f084-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ca6910b9-555a-465c-9c8d-42e5a806f084" (UID: "ca6910b9-555a-465c-9c8d-42e5a806f084"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:04:19 crc kubenswrapper[4812]: I1125 17:04:19.905007 4812 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca6910b9-555a-465c-9c8d-42e5a806f084-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:19 crc kubenswrapper[4812]: I1125 17:04:19.905064 4812 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ca6910b9-555a-465c-9c8d-42e5a806f084-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:19 crc kubenswrapper[4812]: I1125 17:04:19.905073 4812 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca6910b9-555a-465c-9c8d-42e5a806f084-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:19 crc kubenswrapper[4812]: I1125 17:04:19.905084 4812 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ca6910b9-555a-465c-9c8d-42e5a806f084-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:19 crc kubenswrapper[4812]: I1125 17:04:19.905117 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lrsxf\" (UniqueName: \"kubernetes.io/projected/ca6910b9-555a-465c-9c8d-42e5a806f084-kube-api-access-lrsxf\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:19 crc kubenswrapper[4812]: I1125 17:04:19.905133 4812 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ca6910b9-555a-465c-9c8d-42e5a806f084-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:20 crc kubenswrapper[4812]: I1125 17:04:20.636847 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-wpwcb" event={"ID":"ca6910b9-555a-465c-9c8d-42e5a806f084","Type":"ContainerDied","Data":"c93dcaef5441d7673f9325b450751b5f8c70f810c517b595e5ad3727c421b2c0"} Nov 25 17:04:20 crc kubenswrapper[4812]: I1125 17:04:20.636885 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c93dcaef5441d7673f9325b450751b5f8c70f810c517b595e5ad3727c421b2c0" Nov 25 17:04:20 crc kubenswrapper[4812]: I1125 17:04:20.636889 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-wpwcb" Nov 25 17:04:20 crc kubenswrapper[4812]: I1125 17:04:20.800871 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-wpwcb"] Nov 25 17:04:20 crc kubenswrapper[4812]: I1125 17:04:20.806376 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-wpwcb"] Nov 25 17:04:20 crc kubenswrapper[4812]: I1125 17:04:20.905856 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-8k7d7"] Nov 25 17:04:20 crc kubenswrapper[4812]: E1125 17:04:20.906206 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6ecb97d-9b95-4f80-888a-6a7574f89ab6" containerName="init" Nov 25 17:04:20 crc kubenswrapper[4812]: I1125 17:04:20.906219 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6ecb97d-9b95-4f80-888a-6a7574f89ab6" containerName="init" Nov 25 17:04:20 crc kubenswrapper[4812]: E1125 17:04:20.906234 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca6910b9-555a-465c-9c8d-42e5a806f084" containerName="keystone-bootstrap" Nov 25 17:04:20 crc kubenswrapper[4812]: I1125 17:04:20.906240 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca6910b9-555a-465c-9c8d-42e5a806f084" containerName="keystone-bootstrap" Nov 25 17:04:20 crc kubenswrapper[4812]: I1125 17:04:20.906386 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca6910b9-555a-465c-9c8d-42e5a806f084" containerName="keystone-bootstrap" Nov 25 17:04:20 crc kubenswrapper[4812]: I1125 17:04:20.906411 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6ecb97d-9b95-4f80-888a-6a7574f89ab6" containerName="init" Nov 25 17:04:20 crc kubenswrapper[4812]: I1125 17:04:20.907078 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8k7d7" Nov 25 17:04:20 crc kubenswrapper[4812]: I1125 17:04:20.909453 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 17:04:20 crc kubenswrapper[4812]: I1125 17:04:20.909691 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 17:04:20 crc kubenswrapper[4812]: I1125 17:04:20.910413 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-s426j" Nov 25 17:04:20 crc kubenswrapper[4812]: I1125 17:04:20.912027 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 17:04:20 crc kubenswrapper[4812]: I1125 17:04:20.912403 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 25 17:04:20 crc kubenswrapper[4812]: I1125 17:04:20.916728 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-8k7d7"] Nov 25 17:04:21 crc kubenswrapper[4812]: I1125 17:04:21.024656 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cp2fr\" (UniqueName: \"kubernetes.io/projected/b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7-kube-api-access-cp2fr\") pod \"keystone-bootstrap-8k7d7\" (UID: \"b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7\") " pod="openstack/keystone-bootstrap-8k7d7" Nov 25 17:04:21 crc kubenswrapper[4812]: I1125 17:04:21.024747 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7-scripts\") pod \"keystone-bootstrap-8k7d7\" (UID: \"b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7\") " pod="openstack/keystone-bootstrap-8k7d7" Nov 25 17:04:21 crc kubenswrapper[4812]: I1125 17:04:21.024785 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7-combined-ca-bundle\") pod \"keystone-bootstrap-8k7d7\" (UID: \"b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7\") " pod="openstack/keystone-bootstrap-8k7d7" Nov 25 17:04:21 crc kubenswrapper[4812]: I1125 17:04:21.024843 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7-config-data\") pod \"keystone-bootstrap-8k7d7\" (UID: \"b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7\") " pod="openstack/keystone-bootstrap-8k7d7" Nov 25 17:04:21 crc kubenswrapper[4812]: I1125 17:04:21.024869 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7-credential-keys\") pod \"keystone-bootstrap-8k7d7\" (UID: \"b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7\") " pod="openstack/keystone-bootstrap-8k7d7" Nov 25 17:04:21 crc kubenswrapper[4812]: I1125 17:04:21.025011 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7-fernet-keys\") pod \"keystone-bootstrap-8k7d7\" (UID: \"b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7\") " pod="openstack/keystone-bootstrap-8k7d7" Nov 25 17:04:21 crc kubenswrapper[4812]: I1125 17:04:21.126052 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7-config-data\") pod \"keystone-bootstrap-8k7d7\" (UID: \"b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7\") " pod="openstack/keystone-bootstrap-8k7d7" Nov 25 17:04:21 crc kubenswrapper[4812]: I1125 17:04:21.126106 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7-credential-keys\") pod \"keystone-bootstrap-8k7d7\" (UID: \"b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7\") " pod="openstack/keystone-bootstrap-8k7d7" Nov 25 17:04:21 crc kubenswrapper[4812]: I1125 17:04:21.126176 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7-fernet-keys\") pod \"keystone-bootstrap-8k7d7\" (UID: \"b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7\") " pod="openstack/keystone-bootstrap-8k7d7" Nov 25 17:04:21 crc kubenswrapper[4812]: I1125 17:04:21.126220 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cp2fr\" (UniqueName: \"kubernetes.io/projected/b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7-kube-api-access-cp2fr\") pod \"keystone-bootstrap-8k7d7\" (UID: \"b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7\") " pod="openstack/keystone-bootstrap-8k7d7" Nov 25 17:04:21 crc kubenswrapper[4812]: I1125 17:04:21.126258 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7-scripts\") pod \"keystone-bootstrap-8k7d7\" (UID: \"b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7\") " pod="openstack/keystone-bootstrap-8k7d7" Nov 25 17:04:21 crc kubenswrapper[4812]: I1125 17:04:21.126279 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7-combined-ca-bundle\") pod \"keystone-bootstrap-8k7d7\" (UID: \"b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7\") " pod="openstack/keystone-bootstrap-8k7d7" Nov 25 17:04:21 crc kubenswrapper[4812]: I1125 17:04:21.131054 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7-scripts\") pod \"keystone-bootstrap-8k7d7\" (UID: \"b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7\") " pod="openstack/keystone-bootstrap-8k7d7" Nov 25 17:04:21 crc kubenswrapper[4812]: I1125 17:04:21.131569 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7-config-data\") pod \"keystone-bootstrap-8k7d7\" (UID: \"b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7\") " pod="openstack/keystone-bootstrap-8k7d7" Nov 25 17:04:21 crc kubenswrapper[4812]: I1125 17:04:21.131617 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7-fernet-keys\") pod \"keystone-bootstrap-8k7d7\" (UID: \"b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7\") " pod="openstack/keystone-bootstrap-8k7d7" Nov 25 17:04:21 crc kubenswrapper[4812]: I1125 17:04:21.132422 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7-credential-keys\") pod \"keystone-bootstrap-8k7d7\" (UID: \"b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7\") " pod="openstack/keystone-bootstrap-8k7d7" Nov 25 17:04:21 crc kubenswrapper[4812]: I1125 17:04:21.135625 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7-combined-ca-bundle\") pod \"keystone-bootstrap-8k7d7\" (UID: \"b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7\") " pod="openstack/keystone-bootstrap-8k7d7" Nov 25 17:04:21 crc kubenswrapper[4812]: I1125 17:04:21.157460 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cp2fr\" (UniqueName: \"kubernetes.io/projected/b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7-kube-api-access-cp2fr\") pod \"keystone-bootstrap-8k7d7\" (UID: \"b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7\") " pod="openstack/keystone-bootstrap-8k7d7" Nov 25 17:04:21 crc kubenswrapper[4812]: I1125 17:04:21.225934 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8k7d7" Nov 25 17:04:21 crc kubenswrapper[4812]: I1125 17:04:21.844312 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ca6910b9-555a-465c-9c8d-42e5a806f084" path="/var/lib/kubelet/pods/ca6910b9-555a-465c-9c8d-42e5a806f084/volumes" Nov 25 17:04:22 crc kubenswrapper[4812]: I1125 17:04:22.685267 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7987f74bbc-vhnzb" Nov 25 17:04:22 crc kubenswrapper[4812]: I1125 17:04:22.748505 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-54f9b7b8d9-w8rxh"] Nov 25 17:04:22 crc kubenswrapper[4812]: I1125 17:04:22.748900 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-54f9b7b8d9-w8rxh" podUID="07da6a02-0fa5-4f26-be21-ab68f365c412" containerName="dnsmasq-dns" containerID="cri-o://b2592d6694eb11c675ad251d0732050581d4d07cbe5de16460b6cc2f94cf7c46" gracePeriod=10 Nov 25 17:04:25 crc kubenswrapper[4812]: E1125 17:04:25.634443 4812 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Nov 25 17:04:25 crc kubenswrapper[4812]: E1125 17:04:25.635320 4812 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6868h,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-t5rs4_openstack(3f63bc45-88e5-4074-b40d-a2741fa63339): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 17:04:25 crc kubenswrapper[4812]: E1125 17:04:25.636517 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-t5rs4" podUID="3f63bc45-88e5-4074-b40d-a2741fa63339" Nov 25 17:04:25 crc kubenswrapper[4812]: I1125 17:04:25.677160 4812 generic.go:334] "Generic (PLEG): container finished" podID="07da6a02-0fa5-4f26-be21-ab68f365c412" containerID="b2592d6694eb11c675ad251d0732050581d4d07cbe5de16460b6cc2f94cf7c46" exitCode=0 Nov 25 17:04:25 crc kubenswrapper[4812]: I1125 17:04:25.677240 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54f9b7b8d9-w8rxh" event={"ID":"07da6a02-0fa5-4f26-be21-ab68f365c412","Type":"ContainerDied","Data":"b2592d6694eb11c675ad251d0732050581d4d07cbe5de16460b6cc2f94cf7c46"} Nov 25 17:04:25 crc kubenswrapper[4812]: E1125 17:04:25.678576 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-t5rs4" podUID="3f63bc45-88e5-4074-b40d-a2741fa63339" Nov 25 17:04:25 crc kubenswrapper[4812]: I1125 17:04:25.934959 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-54f9b7b8d9-w8rxh" podUID="07da6a02-0fa5-4f26-be21-ab68f365c412" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.126:5353: connect: connection refused" Nov 25 17:04:27 crc kubenswrapper[4812]: I1125 17:04:27.333037 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:04:27 crc kubenswrapper[4812]: I1125 17:04:27.333328 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:04:27 crc kubenswrapper[4812]: I1125 17:04:27.333383 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" Nov 25 17:04:27 crc kubenswrapper[4812]: I1125 17:04:27.334215 4812 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1342f495637eb94354f1b480bb23cc055dabea0e94c3ee8c3777be3bb44ef47e"} pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 17:04:27 crc kubenswrapper[4812]: I1125 17:04:27.334277 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" containerID="cri-o://1342f495637eb94354f1b480bb23cc055dabea0e94c3ee8c3777be3bb44ef47e" gracePeriod=600 Nov 25 17:04:28 crc kubenswrapper[4812]: I1125 17:04:28.705691 4812 generic.go:334] "Generic (PLEG): container finished" podID="8ed911cf-2139-4b12-84ba-af635585ba29" containerID="1342f495637eb94354f1b480bb23cc055dabea0e94c3ee8c3777be3bb44ef47e" exitCode=0 Nov 25 17:04:28 crc kubenswrapper[4812]: I1125 17:04:28.705756 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" event={"ID":"8ed911cf-2139-4b12-84ba-af635585ba29","Type":"ContainerDied","Data":"1342f495637eb94354f1b480bb23cc055dabea0e94c3ee8c3777be3bb44ef47e"} Nov 25 17:04:28 crc kubenswrapper[4812]: I1125 17:04:28.706062 4812 scope.go:117] "RemoveContainer" containerID="0916093b8e73989d7d0a8f475c7e60ef04e5cae4ae347d150e81560d1068b4c0" Nov 25 17:04:29 crc kubenswrapper[4812]: E1125 17:04:29.941356 4812 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-placement-api:current-podified" Nov 25 17:04:29 crc kubenswrapper[4812]: E1125 17:04:29.941941 4812 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:placement-db-sync,Image:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/placement,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:placement-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-75mjh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42482,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-db-sync-j4kgx_openstack(bb88081d-3888-485c-8105-d3ab11630457): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 17:04:29 crc kubenswrapper[4812]: E1125 17:04:29.943356 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/placement-db-sync-j4kgx" podUID="bb88081d-3888-485c-8105-d3ab11630457" Nov 25 17:04:30 crc kubenswrapper[4812]: E1125 17:04:30.724021 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-placement-api:current-podified\\\"\"" pod="openstack/placement-db-sync-j4kgx" podUID="bb88081d-3888-485c-8105-d3ab11630457" Nov 25 17:04:30 crc kubenswrapper[4812]: I1125 17:04:30.934888 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-54f9b7b8d9-w8rxh" podUID="07da6a02-0fa5-4f26-be21-ab68f365c412" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.126:5353: connect: connection refused" Nov 25 17:04:35 crc kubenswrapper[4812]: I1125 17:04:35.934916 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-54f9b7b8d9-w8rxh" podUID="07da6a02-0fa5-4f26-be21-ab68f365c412" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.126:5353: connect: connection refused" Nov 25 17:04:35 crc kubenswrapper[4812]: I1125 17:04:35.935637 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-54f9b7b8d9-w8rxh" Nov 25 17:04:40 crc kubenswrapper[4812]: I1125 17:04:40.934107 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-54f9b7b8d9-w8rxh" podUID="07da6a02-0fa5-4f26-be21-ab68f365c412" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.126:5353: connect: connection refused" Nov 25 17:04:45 crc kubenswrapper[4812]: I1125 17:04:45.935316 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-54f9b7b8d9-w8rxh" podUID="07da6a02-0fa5-4f26-be21-ab68f365c412" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.126:5353: connect: connection refused" Nov 25 17:04:49 crc kubenswrapper[4812]: E1125 17:04:49.170780 4812 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified" Nov 25 17:04:49 crc kubenswrapper[4812]: E1125 17:04:49.171619 4812 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ncch689hc4h56h56fh67h4hb8h68dh665h687h5bch5fh55dh57h6dh595h5dh654h649hb4h574h589h688h66bh67bh689hc5h68dhcdh648h9fq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-x8dkn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(2e50f7bc-1b06-447d-b556-6a7adc34b072): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 17:04:49 crc kubenswrapper[4812]: I1125 17:04:49.244699 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54f9b7b8d9-w8rxh" Nov 25 17:04:49 crc kubenswrapper[4812]: I1125 17:04:49.330332 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/07da6a02-0fa5-4f26-be21-ab68f365c412-ovsdbserver-sb\") pod \"07da6a02-0fa5-4f26-be21-ab68f365c412\" (UID: \"07da6a02-0fa5-4f26-be21-ab68f365c412\") " Nov 25 17:04:49 crc kubenswrapper[4812]: I1125 17:04:49.330440 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/07da6a02-0fa5-4f26-be21-ab68f365c412-config\") pod \"07da6a02-0fa5-4f26-be21-ab68f365c412\" (UID: \"07da6a02-0fa5-4f26-be21-ab68f365c412\") " Nov 25 17:04:49 crc kubenswrapper[4812]: I1125 17:04:49.330471 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/07da6a02-0fa5-4f26-be21-ab68f365c412-dns-svc\") pod \"07da6a02-0fa5-4f26-be21-ab68f365c412\" (UID: \"07da6a02-0fa5-4f26-be21-ab68f365c412\") " Nov 25 17:04:49 crc kubenswrapper[4812]: I1125 17:04:49.331314 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hdtzz\" (UniqueName: \"kubernetes.io/projected/07da6a02-0fa5-4f26-be21-ab68f365c412-kube-api-access-hdtzz\") pod \"07da6a02-0fa5-4f26-be21-ab68f365c412\" (UID: \"07da6a02-0fa5-4f26-be21-ab68f365c412\") " Nov 25 17:04:49 crc kubenswrapper[4812]: I1125 17:04:49.331408 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/07da6a02-0fa5-4f26-be21-ab68f365c412-ovsdbserver-nb\") pod \"07da6a02-0fa5-4f26-be21-ab68f365c412\" (UID: \"07da6a02-0fa5-4f26-be21-ab68f365c412\") " Nov 25 17:04:49 crc kubenswrapper[4812]: I1125 17:04:49.338313 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07da6a02-0fa5-4f26-be21-ab68f365c412-kube-api-access-hdtzz" (OuterVolumeSpecName: "kube-api-access-hdtzz") pod "07da6a02-0fa5-4f26-be21-ab68f365c412" (UID: "07da6a02-0fa5-4f26-be21-ab68f365c412"). InnerVolumeSpecName "kube-api-access-hdtzz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:04:49 crc kubenswrapper[4812]: I1125 17:04:49.414405 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/07da6a02-0fa5-4f26-be21-ab68f365c412-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "07da6a02-0fa5-4f26-be21-ab68f365c412" (UID: "07da6a02-0fa5-4f26-be21-ab68f365c412"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:04:49 crc kubenswrapper[4812]: I1125 17:04:49.434177 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hdtzz\" (UniqueName: \"kubernetes.io/projected/07da6a02-0fa5-4f26-be21-ab68f365c412-kube-api-access-hdtzz\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:49 crc kubenswrapper[4812]: I1125 17:04:49.434219 4812 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/07da6a02-0fa5-4f26-be21-ab68f365c412-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:49 crc kubenswrapper[4812]: I1125 17:04:49.453795 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/07da6a02-0fa5-4f26-be21-ab68f365c412-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "07da6a02-0fa5-4f26-be21-ab68f365c412" (UID: "07da6a02-0fa5-4f26-be21-ab68f365c412"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:04:49 crc kubenswrapper[4812]: I1125 17:04:49.458012 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/07da6a02-0fa5-4f26-be21-ab68f365c412-config" (OuterVolumeSpecName: "config") pod "07da6a02-0fa5-4f26-be21-ab68f365c412" (UID: "07da6a02-0fa5-4f26-be21-ab68f365c412"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:04:49 crc kubenswrapper[4812]: I1125 17:04:49.458026 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/07da6a02-0fa5-4f26-be21-ab68f365c412-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "07da6a02-0fa5-4f26-be21-ab68f365c412" (UID: "07da6a02-0fa5-4f26-be21-ab68f365c412"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:04:49 crc kubenswrapper[4812]: I1125 17:04:49.536261 4812 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/07da6a02-0fa5-4f26-be21-ab68f365c412-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:49 crc kubenswrapper[4812]: I1125 17:04:49.536310 4812 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/07da6a02-0fa5-4f26-be21-ab68f365c412-config\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:49 crc kubenswrapper[4812]: I1125 17:04:49.536319 4812 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/07da6a02-0fa5-4f26-be21-ab68f365c412-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:49 crc kubenswrapper[4812]: I1125 17:04:49.893928 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54f9b7b8d9-w8rxh" event={"ID":"07da6a02-0fa5-4f26-be21-ab68f365c412","Type":"ContainerDied","Data":"6099ef8372e6eeaaf1e6f7430215f3ea4ec02689097b5b0c0e64f2fc497d4e67"} Nov 25 17:04:49 crc kubenswrapper[4812]: I1125 17:04:49.894028 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54f9b7b8d9-w8rxh" Nov 25 17:04:49 crc kubenswrapper[4812]: I1125 17:04:49.919169 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-54f9b7b8d9-w8rxh"] Nov 25 17:04:49 crc kubenswrapper[4812]: I1125 17:04:49.932771 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-54f9b7b8d9-w8rxh"] Nov 25 17:04:50 crc kubenswrapper[4812]: I1125 17:04:50.375781 4812 scope.go:117] "RemoveContainer" containerID="b2592d6694eb11c675ad251d0732050581d4d07cbe5de16460b6cc2f94cf7c46" Nov 25 17:04:50 crc kubenswrapper[4812]: E1125 17:04:50.397423 4812 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Nov 25 17:04:50 crc kubenswrapper[4812]: E1125 17:04:50.397872 4812 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jck2v,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-xz5j9_openstack(ee5fe32b-eefd-4847-a053-b72c9f06e3b1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 17:04:50 crc kubenswrapper[4812]: E1125 17:04:50.399090 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-xz5j9" podUID="ee5fe32b-eefd-4847-a053-b72c9f06e3b1" Nov 25 17:04:50 crc kubenswrapper[4812]: I1125 17:04:50.477003 4812 scope.go:117] "RemoveContainer" containerID="2c20c37cb7b174f00153733b5b0afffa924da1054c264659e6b9e3c913a1fa73" Nov 25 17:04:50 crc kubenswrapper[4812]: I1125 17:04:50.631924 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-8k7d7"] Nov 25 17:04:50 crc kubenswrapper[4812]: I1125 17:04:50.904567 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-j4kgx" event={"ID":"bb88081d-3888-485c-8105-d3ab11630457","Type":"ContainerStarted","Data":"319bbb71a6231da31df1197c5c410bd7a43dae2e9d7adf6316a9491e4db366ec"} Nov 25 17:04:50 crc kubenswrapper[4812]: I1125 17:04:50.906422 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-t5rs4" event={"ID":"3f63bc45-88e5-4074-b40d-a2741fa63339","Type":"ContainerStarted","Data":"16f8e292b902a2f39c821f6cd59071fe4f7c729ff120376c24b25bf887258729"} Nov 25 17:04:50 crc kubenswrapper[4812]: I1125 17:04:50.909056 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8k7d7" event={"ID":"b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7","Type":"ContainerStarted","Data":"12c65c0b0c807733b0334461386820f6982dee7654077de6ea6b529bbe33c74c"} Nov 25 17:04:50 crc kubenswrapper[4812]: I1125 17:04:50.909098 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8k7d7" event={"ID":"b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7","Type":"ContainerStarted","Data":"2b446887ee40b0904937f0949014cd703e1777ec7281fa9a1e824b2959359025"} Nov 25 17:04:50 crc kubenswrapper[4812]: I1125 17:04:50.914645 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" event={"ID":"8ed911cf-2139-4b12-84ba-af635585ba29","Type":"ContainerStarted","Data":"f4e03a1e42f2ab4e7283089fdd598dd4009c999c3046bed0520d29498108218e"} Nov 25 17:04:50 crc kubenswrapper[4812]: E1125 17:04:50.915970 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-xz5j9" podUID="ee5fe32b-eefd-4847-a053-b72c9f06e3b1" Nov 25 17:04:50 crc kubenswrapper[4812]: I1125 17:04:50.931898 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-j4kgx" podStartSLOduration=1.364450033 podStartE2EDuration="38.931873073s" podCreationTimestamp="2025-11-25 17:04:12 +0000 UTC" firstStartedPulling="2025-11-25 17:04:12.916098536 +0000 UTC m=+1027.756240631" lastFinishedPulling="2025-11-25 17:04:50.483521576 +0000 UTC m=+1065.323663671" observedRunningTime="2025-11-25 17:04:50.923039102 +0000 UTC m=+1065.763181207" watchObservedRunningTime="2025-11-25 17:04:50.931873073 +0000 UTC m=+1065.772015168" Nov 25 17:04:50 crc kubenswrapper[4812]: I1125 17:04:50.942458 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-8k7d7" podStartSLOduration=30.942442683 podStartE2EDuration="30.942442683s" podCreationTimestamp="2025-11-25 17:04:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:04:50.938015801 +0000 UTC m=+1065.778157916" watchObservedRunningTime="2025-11-25 17:04:50.942442683 +0000 UTC m=+1065.782584768" Nov 25 17:04:50 crc kubenswrapper[4812]: I1125 17:04:50.955984 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-t5rs4" podStartSLOduration=1.655416271 podStartE2EDuration="38.955962533s" podCreationTimestamp="2025-11-25 17:04:12 +0000 UTC" firstStartedPulling="2025-11-25 17:04:13.196758161 +0000 UTC m=+1028.036900246" lastFinishedPulling="2025-11-25 17:04:50.497304413 +0000 UTC m=+1065.337446508" observedRunningTime="2025-11-25 17:04:50.952839288 +0000 UTC m=+1065.792981383" watchObservedRunningTime="2025-11-25 17:04:50.955962533 +0000 UTC m=+1065.796104628" Nov 25 17:04:51 crc kubenswrapper[4812]: I1125 17:04:51.842750 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="07da6a02-0fa5-4f26-be21-ab68f365c412" path="/var/lib/kubelet/pods/07da6a02-0fa5-4f26-be21-ab68f365c412/volumes" Nov 25 17:04:51 crc kubenswrapper[4812]: I1125 17:04:51.930179 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2e50f7bc-1b06-447d-b556-6a7adc34b072","Type":"ContainerStarted","Data":"957cdd1a798ab2cc8cb5d049404b30e0168325ba9f8ec92c713bba8568e809a1"} Nov 25 17:04:53 crc kubenswrapper[4812]: I1125 17:04:53.949456 4812 generic.go:334] "Generic (PLEG): container finished" podID="b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7" containerID="12c65c0b0c807733b0334461386820f6982dee7654077de6ea6b529bbe33c74c" exitCode=0 Nov 25 17:04:53 crc kubenswrapper[4812]: I1125 17:04:53.949689 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8k7d7" event={"ID":"b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7","Type":"ContainerDied","Data":"12c65c0b0c807733b0334461386820f6982dee7654077de6ea6b529bbe33c74c"} Nov 25 17:04:54 crc kubenswrapper[4812]: I1125 17:04:54.959161 4812 generic.go:334] "Generic (PLEG): container finished" podID="bb88081d-3888-485c-8105-d3ab11630457" containerID="319bbb71a6231da31df1197c5c410bd7a43dae2e9d7adf6316a9491e4db366ec" exitCode=0 Nov 25 17:04:54 crc kubenswrapper[4812]: I1125 17:04:54.959236 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-j4kgx" event={"ID":"bb88081d-3888-485c-8105-d3ab11630457","Type":"ContainerDied","Data":"319bbb71a6231da31df1197c5c410bd7a43dae2e9d7adf6316a9491e4db366ec"} Nov 25 17:04:54 crc kubenswrapper[4812]: I1125 17:04:54.961335 4812 generic.go:334] "Generic (PLEG): container finished" podID="3f63bc45-88e5-4074-b40d-a2741fa63339" containerID="16f8e292b902a2f39c821f6cd59071fe4f7c729ff120376c24b25bf887258729" exitCode=0 Nov 25 17:04:54 crc kubenswrapper[4812]: I1125 17:04:54.961423 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-t5rs4" event={"ID":"3f63bc45-88e5-4074-b40d-a2741fa63339","Type":"ContainerDied","Data":"16f8e292b902a2f39c821f6cd59071fe4f7c729ff120376c24b25bf887258729"} Nov 25 17:04:56 crc kubenswrapper[4812]: I1125 17:04:56.102146 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8k7d7" Nov 25 17:04:56 crc kubenswrapper[4812]: I1125 17:04:56.158925 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7-fernet-keys\") pod \"b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7\" (UID: \"b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7\") " Nov 25 17:04:56 crc kubenswrapper[4812]: I1125 17:04:56.159095 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7-scripts\") pod \"b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7\" (UID: \"b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7\") " Nov 25 17:04:56 crc kubenswrapper[4812]: I1125 17:04:56.159158 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7-combined-ca-bundle\") pod \"b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7\" (UID: \"b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7\") " Nov 25 17:04:56 crc kubenswrapper[4812]: I1125 17:04:56.159181 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cp2fr\" (UniqueName: \"kubernetes.io/projected/b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7-kube-api-access-cp2fr\") pod \"b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7\" (UID: \"b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7\") " Nov 25 17:04:56 crc kubenswrapper[4812]: I1125 17:04:56.159204 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7-config-data\") pod \"b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7\" (UID: \"b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7\") " Nov 25 17:04:56 crc kubenswrapper[4812]: I1125 17:04:56.159245 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7-credential-keys\") pod \"b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7\" (UID: \"b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7\") " Nov 25 17:04:56 crc kubenswrapper[4812]: I1125 17:04:56.163983 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7" (UID: "b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:04:56 crc kubenswrapper[4812]: I1125 17:04:56.164754 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7-kube-api-access-cp2fr" (OuterVolumeSpecName: "kube-api-access-cp2fr") pod "b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7" (UID: "b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7"). InnerVolumeSpecName "kube-api-access-cp2fr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:04:56 crc kubenswrapper[4812]: I1125 17:04:56.168278 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7" (UID: "b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:04:56 crc kubenswrapper[4812]: I1125 17:04:56.171906 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7-scripts" (OuterVolumeSpecName: "scripts") pod "b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7" (UID: "b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:04:56 crc kubenswrapper[4812]: I1125 17:04:56.185916 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7-config-data" (OuterVolumeSpecName: "config-data") pod "b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7" (UID: "b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:04:56 crc kubenswrapper[4812]: I1125 17:04:56.192915 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7" (UID: "b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:04:56 crc kubenswrapper[4812]: I1125 17:04:56.261640 4812 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:56 crc kubenswrapper[4812]: I1125 17:04:56.261851 4812 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:56 crc kubenswrapper[4812]: I1125 17:04:56.261860 4812 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:56 crc kubenswrapper[4812]: I1125 17:04:56.261870 4812 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:56 crc kubenswrapper[4812]: I1125 17:04:56.261879 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cp2fr\" (UniqueName: \"kubernetes.io/projected/b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7-kube-api-access-cp2fr\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:56 crc kubenswrapper[4812]: I1125 17:04:56.261890 4812 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:56 crc kubenswrapper[4812]: I1125 17:04:56.266605 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-j4kgx" Nov 25 17:04:56 crc kubenswrapper[4812]: I1125 17:04:56.362590 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb88081d-3888-485c-8105-d3ab11630457-combined-ca-bundle\") pod \"bb88081d-3888-485c-8105-d3ab11630457\" (UID: \"bb88081d-3888-485c-8105-d3ab11630457\") " Nov 25 17:04:56 crc kubenswrapper[4812]: I1125 17:04:56.362641 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bb88081d-3888-485c-8105-d3ab11630457-logs\") pod \"bb88081d-3888-485c-8105-d3ab11630457\" (UID: \"bb88081d-3888-485c-8105-d3ab11630457\") " Nov 25 17:04:56 crc kubenswrapper[4812]: I1125 17:04:56.363294 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bb88081d-3888-485c-8105-d3ab11630457-logs" (OuterVolumeSpecName: "logs") pod "bb88081d-3888-485c-8105-d3ab11630457" (UID: "bb88081d-3888-485c-8105-d3ab11630457"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:04:56 crc kubenswrapper[4812]: I1125 17:04:56.363442 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb88081d-3888-485c-8105-d3ab11630457-config-data\") pod \"bb88081d-3888-485c-8105-d3ab11630457\" (UID: \"bb88081d-3888-485c-8105-d3ab11630457\") " Nov 25 17:04:56 crc kubenswrapper[4812]: I1125 17:04:56.363463 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-75mjh\" (UniqueName: \"kubernetes.io/projected/bb88081d-3888-485c-8105-d3ab11630457-kube-api-access-75mjh\") pod \"bb88081d-3888-485c-8105-d3ab11630457\" (UID: \"bb88081d-3888-485c-8105-d3ab11630457\") " Nov 25 17:04:56 crc kubenswrapper[4812]: I1125 17:04:56.363752 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bb88081d-3888-485c-8105-d3ab11630457-scripts\") pod \"bb88081d-3888-485c-8105-d3ab11630457\" (UID: \"bb88081d-3888-485c-8105-d3ab11630457\") " Nov 25 17:04:56 crc kubenswrapper[4812]: I1125 17:04:56.364820 4812 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bb88081d-3888-485c-8105-d3ab11630457-logs\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:56 crc kubenswrapper[4812]: I1125 17:04:56.367751 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bb88081d-3888-485c-8105-d3ab11630457-kube-api-access-75mjh" (OuterVolumeSpecName: "kube-api-access-75mjh") pod "bb88081d-3888-485c-8105-d3ab11630457" (UID: "bb88081d-3888-485c-8105-d3ab11630457"). InnerVolumeSpecName "kube-api-access-75mjh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:04:56 crc kubenswrapper[4812]: I1125 17:04:56.369514 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb88081d-3888-485c-8105-d3ab11630457-scripts" (OuterVolumeSpecName: "scripts") pod "bb88081d-3888-485c-8105-d3ab11630457" (UID: "bb88081d-3888-485c-8105-d3ab11630457"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:04:56 crc kubenswrapper[4812]: I1125 17:04:56.390405 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb88081d-3888-485c-8105-d3ab11630457-config-data" (OuterVolumeSpecName: "config-data") pod "bb88081d-3888-485c-8105-d3ab11630457" (UID: "bb88081d-3888-485c-8105-d3ab11630457"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:04:56 crc kubenswrapper[4812]: I1125 17:04:56.392132 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb88081d-3888-485c-8105-d3ab11630457-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bb88081d-3888-485c-8105-d3ab11630457" (UID: "bb88081d-3888-485c-8105-d3ab11630457"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:04:56 crc kubenswrapper[4812]: I1125 17:04:56.465752 4812 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb88081d-3888-485c-8105-d3ab11630457-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:56 crc kubenswrapper[4812]: I1125 17:04:56.465779 4812 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb88081d-3888-485c-8105-d3ab11630457-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:56 crc kubenswrapper[4812]: I1125 17:04:56.465790 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-75mjh\" (UniqueName: \"kubernetes.io/projected/bb88081d-3888-485c-8105-d3ab11630457-kube-api-access-75mjh\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:56 crc kubenswrapper[4812]: I1125 17:04:56.465798 4812 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bb88081d-3888-485c-8105-d3ab11630457-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:56 crc kubenswrapper[4812]: I1125 17:04:56.988512 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8k7d7" event={"ID":"b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7","Type":"ContainerDied","Data":"2b446887ee40b0904937f0949014cd703e1777ec7281fa9a1e824b2959359025"} Nov 25 17:04:56 crc kubenswrapper[4812]: I1125 17:04:56.988876 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2b446887ee40b0904937f0949014cd703e1777ec7281fa9a1e824b2959359025" Nov 25 17:04:56 crc kubenswrapper[4812]: I1125 17:04:56.988948 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8k7d7" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.001360 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-t5rs4" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.012690 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2e50f7bc-1b06-447d-b556-6a7adc34b072","Type":"ContainerStarted","Data":"9b0cc1fbc772f906886862761f9121665dc887ae5cbe68f38c4b4b393e63546a"} Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.014642 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-j4kgx" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.014778 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-j4kgx" event={"ID":"bb88081d-3888-485c-8105-d3ab11630457","Type":"ContainerDied","Data":"a2a20fb29859a109daf3e690e6d751205bf648dec036a6545bf78f25c6f8aedc"} Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.014826 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a2a20fb29859a109daf3e690e6d751205bf648dec036a6545bf78f25c6f8aedc" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.016662 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-t5rs4" event={"ID":"3f63bc45-88e5-4074-b40d-a2741fa63339","Type":"ContainerDied","Data":"6041abd92dfec2d539726b5ad0607af5ce5bcb4acbb39c39cf35f0882a417745"} Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.016689 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6041abd92dfec2d539726b5ad0607af5ce5bcb4acbb39c39cf35f0882a417745" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.017166 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-t5rs4" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.078896 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/3f63bc45-88e5-4074-b40d-a2741fa63339-db-sync-config-data\") pod \"3f63bc45-88e5-4074-b40d-a2741fa63339\" (UID: \"3f63bc45-88e5-4074-b40d-a2741fa63339\") " Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.079050 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f63bc45-88e5-4074-b40d-a2741fa63339-combined-ca-bundle\") pod \"3f63bc45-88e5-4074-b40d-a2741fa63339\" (UID: \"3f63bc45-88e5-4074-b40d-a2741fa63339\") " Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.079105 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6868h\" (UniqueName: \"kubernetes.io/projected/3f63bc45-88e5-4074-b40d-a2741fa63339-kube-api-access-6868h\") pod \"3f63bc45-88e5-4074-b40d-a2741fa63339\" (UID: \"3f63bc45-88e5-4074-b40d-a2741fa63339\") " Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.099411 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f63bc45-88e5-4074-b40d-a2741fa63339-kube-api-access-6868h" (OuterVolumeSpecName: "kube-api-access-6868h") pod "3f63bc45-88e5-4074-b40d-a2741fa63339" (UID: "3f63bc45-88e5-4074-b40d-a2741fa63339"). InnerVolumeSpecName "kube-api-access-6868h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.105974 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f63bc45-88e5-4074-b40d-a2741fa63339-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3f63bc45-88e5-4074-b40d-a2741fa63339" (UID: "3f63bc45-88e5-4074-b40d-a2741fa63339"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.117241 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f63bc45-88e5-4074-b40d-a2741fa63339-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "3f63bc45-88e5-4074-b40d-a2741fa63339" (UID: "3f63bc45-88e5-4074-b40d-a2741fa63339"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.180755 4812 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f63bc45-88e5-4074-b40d-a2741fa63339-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.180799 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6868h\" (UniqueName: \"kubernetes.io/projected/3f63bc45-88e5-4074-b40d-a2741fa63339-kube-api-access-6868h\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.180817 4812 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/3f63bc45-88e5-4074-b40d-a2741fa63339-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.194930 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-995749db8-8znhz"] Nov 25 17:04:57 crc kubenswrapper[4812]: E1125 17:04:57.195279 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f63bc45-88e5-4074-b40d-a2741fa63339" containerName="barbican-db-sync" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.195298 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f63bc45-88e5-4074-b40d-a2741fa63339" containerName="barbican-db-sync" Nov 25 17:04:57 crc kubenswrapper[4812]: E1125 17:04:57.195311 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7" containerName="keystone-bootstrap" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.195318 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7" containerName="keystone-bootstrap" Nov 25 17:04:57 crc kubenswrapper[4812]: E1125 17:04:57.195326 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb88081d-3888-485c-8105-d3ab11630457" containerName="placement-db-sync" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.195332 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb88081d-3888-485c-8105-d3ab11630457" containerName="placement-db-sync" Nov 25 17:04:57 crc kubenswrapper[4812]: E1125 17:04:57.195345 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07da6a02-0fa5-4f26-be21-ab68f365c412" containerName="dnsmasq-dns" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.195361 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="07da6a02-0fa5-4f26-be21-ab68f365c412" containerName="dnsmasq-dns" Nov 25 17:04:57 crc kubenswrapper[4812]: E1125 17:04:57.195386 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07da6a02-0fa5-4f26-be21-ab68f365c412" containerName="init" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.195392 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="07da6a02-0fa5-4f26-be21-ab68f365c412" containerName="init" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.195546 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f63bc45-88e5-4074-b40d-a2741fa63339" containerName="barbican-db-sync" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.195563 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="07da6a02-0fa5-4f26-be21-ab68f365c412" containerName="dnsmasq-dns" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.195574 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb88081d-3888-485c-8105-d3ab11630457" containerName="placement-db-sync" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.195586 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7" containerName="keystone-bootstrap" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.196371 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-995749db8-8znhz" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.201890 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.202240 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.202381 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-rggnk" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.202451 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.202462 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.217758 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-995749db8-8znhz"] Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.281919 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-849d784859-xzm4f"] Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.282938 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c94a1f67-8773-4942-b09c-fb7a0401b5eb-logs\") pod \"placement-995749db8-8znhz\" (UID: \"c94a1f67-8773-4942-b09c-fb7a0401b5eb\") " pod="openstack/placement-995749db8-8znhz" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.283012 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c94a1f67-8773-4942-b09c-fb7a0401b5eb-internal-tls-certs\") pod \"placement-995749db8-8znhz\" (UID: \"c94a1f67-8773-4942-b09c-fb7a0401b5eb\") " pod="openstack/placement-995749db8-8znhz" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.283038 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c94a1f67-8773-4942-b09c-fb7a0401b5eb-public-tls-certs\") pod \"placement-995749db8-8znhz\" (UID: \"c94a1f67-8773-4942-b09c-fb7a0401b5eb\") " pod="openstack/placement-995749db8-8znhz" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.283177 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-849d784859-xzm4f" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.283225 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mv8g9\" (UniqueName: \"kubernetes.io/projected/c94a1f67-8773-4942-b09c-fb7a0401b5eb-kube-api-access-mv8g9\") pod \"placement-995749db8-8znhz\" (UID: \"c94a1f67-8773-4942-b09c-fb7a0401b5eb\") " pod="openstack/placement-995749db8-8znhz" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.283311 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c94a1f67-8773-4942-b09c-fb7a0401b5eb-config-data\") pod \"placement-995749db8-8znhz\" (UID: \"c94a1f67-8773-4942-b09c-fb7a0401b5eb\") " pod="openstack/placement-995749db8-8znhz" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.283421 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c94a1f67-8773-4942-b09c-fb7a0401b5eb-combined-ca-bundle\") pod \"placement-995749db8-8znhz\" (UID: \"c94a1f67-8773-4942-b09c-fb7a0401b5eb\") " pod="openstack/placement-995749db8-8znhz" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.283474 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c94a1f67-8773-4942-b09c-fb7a0401b5eb-scripts\") pod \"placement-995749db8-8znhz\" (UID: \"c94a1f67-8773-4942-b09c-fb7a0401b5eb\") " pod="openstack/placement-995749db8-8znhz" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.289407 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.289566 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.289773 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.290641 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.291105 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.291400 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-s426j" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.317099 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-849d784859-xzm4f"] Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.386286 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mv8g9\" (UniqueName: \"kubernetes.io/projected/c94a1f67-8773-4942-b09c-fb7a0401b5eb-kube-api-access-mv8g9\") pod \"placement-995749db8-8znhz\" (UID: \"c94a1f67-8773-4942-b09c-fb7a0401b5eb\") " pod="openstack/placement-995749db8-8znhz" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.386378 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0529a3c9-9658-422d-b7ff-9db2d402716d-combined-ca-bundle\") pod \"keystone-849d784859-xzm4f\" (UID: \"0529a3c9-9658-422d-b7ff-9db2d402716d\") " pod="openstack/keystone-849d784859-xzm4f" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.386417 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0529a3c9-9658-422d-b7ff-9db2d402716d-public-tls-certs\") pod \"keystone-849d784859-xzm4f\" (UID: \"0529a3c9-9658-422d-b7ff-9db2d402716d\") " pod="openstack/keystone-849d784859-xzm4f" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.386458 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c94a1f67-8773-4942-b09c-fb7a0401b5eb-config-data\") pod \"placement-995749db8-8znhz\" (UID: \"c94a1f67-8773-4942-b09c-fb7a0401b5eb\") " pod="openstack/placement-995749db8-8znhz" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.386516 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c94a1f67-8773-4942-b09c-fb7a0401b5eb-combined-ca-bundle\") pod \"placement-995749db8-8znhz\" (UID: \"c94a1f67-8773-4942-b09c-fb7a0401b5eb\") " pod="openstack/placement-995749db8-8znhz" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.386760 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0529a3c9-9658-422d-b7ff-9db2d402716d-config-data\") pod \"keystone-849d784859-xzm4f\" (UID: \"0529a3c9-9658-422d-b7ff-9db2d402716d\") " pod="openstack/keystone-849d784859-xzm4f" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.386787 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c94a1f67-8773-4942-b09c-fb7a0401b5eb-scripts\") pod \"placement-995749db8-8znhz\" (UID: \"c94a1f67-8773-4942-b09c-fb7a0401b5eb\") " pod="openstack/placement-995749db8-8znhz" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.386822 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bmpr2\" (UniqueName: \"kubernetes.io/projected/0529a3c9-9658-422d-b7ff-9db2d402716d-kube-api-access-bmpr2\") pod \"keystone-849d784859-xzm4f\" (UID: \"0529a3c9-9658-422d-b7ff-9db2d402716d\") " pod="openstack/keystone-849d784859-xzm4f" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.387186 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/0529a3c9-9658-422d-b7ff-9db2d402716d-credential-keys\") pod \"keystone-849d784859-xzm4f\" (UID: \"0529a3c9-9658-422d-b7ff-9db2d402716d\") " pod="openstack/keystone-849d784859-xzm4f" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.388108 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0529a3c9-9658-422d-b7ff-9db2d402716d-fernet-keys\") pod \"keystone-849d784859-xzm4f\" (UID: \"0529a3c9-9658-422d-b7ff-9db2d402716d\") " pod="openstack/keystone-849d784859-xzm4f" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.388184 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0529a3c9-9658-422d-b7ff-9db2d402716d-internal-tls-certs\") pod \"keystone-849d784859-xzm4f\" (UID: \"0529a3c9-9658-422d-b7ff-9db2d402716d\") " pod="openstack/keystone-849d784859-xzm4f" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.388454 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c94a1f67-8773-4942-b09c-fb7a0401b5eb-logs\") pod \"placement-995749db8-8znhz\" (UID: \"c94a1f67-8773-4942-b09c-fb7a0401b5eb\") " pod="openstack/placement-995749db8-8znhz" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.388498 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0529a3c9-9658-422d-b7ff-9db2d402716d-scripts\") pod \"keystone-849d784859-xzm4f\" (UID: \"0529a3c9-9658-422d-b7ff-9db2d402716d\") " pod="openstack/keystone-849d784859-xzm4f" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.388573 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c94a1f67-8773-4942-b09c-fb7a0401b5eb-internal-tls-certs\") pod \"placement-995749db8-8znhz\" (UID: \"c94a1f67-8773-4942-b09c-fb7a0401b5eb\") " pod="openstack/placement-995749db8-8znhz" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.388604 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c94a1f67-8773-4942-b09c-fb7a0401b5eb-public-tls-certs\") pod \"placement-995749db8-8znhz\" (UID: \"c94a1f67-8773-4942-b09c-fb7a0401b5eb\") " pod="openstack/placement-995749db8-8znhz" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.388901 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c94a1f67-8773-4942-b09c-fb7a0401b5eb-logs\") pod \"placement-995749db8-8znhz\" (UID: \"c94a1f67-8773-4942-b09c-fb7a0401b5eb\") " pod="openstack/placement-995749db8-8znhz" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.393653 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c94a1f67-8773-4942-b09c-fb7a0401b5eb-public-tls-certs\") pod \"placement-995749db8-8znhz\" (UID: \"c94a1f67-8773-4942-b09c-fb7a0401b5eb\") " pod="openstack/placement-995749db8-8znhz" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.393717 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c94a1f67-8773-4942-b09c-fb7a0401b5eb-internal-tls-certs\") pod \"placement-995749db8-8znhz\" (UID: \"c94a1f67-8773-4942-b09c-fb7a0401b5eb\") " pod="openstack/placement-995749db8-8znhz" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.393924 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c94a1f67-8773-4942-b09c-fb7a0401b5eb-config-data\") pod \"placement-995749db8-8znhz\" (UID: \"c94a1f67-8773-4942-b09c-fb7a0401b5eb\") " pod="openstack/placement-995749db8-8znhz" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.393918 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c94a1f67-8773-4942-b09c-fb7a0401b5eb-combined-ca-bundle\") pod \"placement-995749db8-8znhz\" (UID: \"c94a1f67-8773-4942-b09c-fb7a0401b5eb\") " pod="openstack/placement-995749db8-8znhz" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.395835 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c94a1f67-8773-4942-b09c-fb7a0401b5eb-scripts\") pod \"placement-995749db8-8znhz\" (UID: \"c94a1f67-8773-4942-b09c-fb7a0401b5eb\") " pod="openstack/placement-995749db8-8znhz" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.408459 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mv8g9\" (UniqueName: \"kubernetes.io/projected/c94a1f67-8773-4942-b09c-fb7a0401b5eb-kube-api-access-mv8g9\") pod \"placement-995749db8-8znhz\" (UID: \"c94a1f67-8773-4942-b09c-fb7a0401b5eb\") " pod="openstack/placement-995749db8-8znhz" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.490777 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bmpr2\" (UniqueName: \"kubernetes.io/projected/0529a3c9-9658-422d-b7ff-9db2d402716d-kube-api-access-bmpr2\") pod \"keystone-849d784859-xzm4f\" (UID: \"0529a3c9-9658-422d-b7ff-9db2d402716d\") " pod="openstack/keystone-849d784859-xzm4f" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.490843 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/0529a3c9-9658-422d-b7ff-9db2d402716d-credential-keys\") pod \"keystone-849d784859-xzm4f\" (UID: \"0529a3c9-9658-422d-b7ff-9db2d402716d\") " pod="openstack/keystone-849d784859-xzm4f" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.490877 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0529a3c9-9658-422d-b7ff-9db2d402716d-fernet-keys\") pod \"keystone-849d784859-xzm4f\" (UID: \"0529a3c9-9658-422d-b7ff-9db2d402716d\") " pod="openstack/keystone-849d784859-xzm4f" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.490905 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0529a3c9-9658-422d-b7ff-9db2d402716d-internal-tls-certs\") pod \"keystone-849d784859-xzm4f\" (UID: \"0529a3c9-9658-422d-b7ff-9db2d402716d\") " pod="openstack/keystone-849d784859-xzm4f" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.490984 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0529a3c9-9658-422d-b7ff-9db2d402716d-scripts\") pod \"keystone-849d784859-xzm4f\" (UID: \"0529a3c9-9658-422d-b7ff-9db2d402716d\") " pod="openstack/keystone-849d784859-xzm4f" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.491025 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0529a3c9-9658-422d-b7ff-9db2d402716d-combined-ca-bundle\") pod \"keystone-849d784859-xzm4f\" (UID: \"0529a3c9-9658-422d-b7ff-9db2d402716d\") " pod="openstack/keystone-849d784859-xzm4f" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.491043 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0529a3c9-9658-422d-b7ff-9db2d402716d-public-tls-certs\") pod \"keystone-849d784859-xzm4f\" (UID: \"0529a3c9-9658-422d-b7ff-9db2d402716d\") " pod="openstack/keystone-849d784859-xzm4f" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.491086 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0529a3c9-9658-422d-b7ff-9db2d402716d-config-data\") pod \"keystone-849d784859-xzm4f\" (UID: \"0529a3c9-9658-422d-b7ff-9db2d402716d\") " pod="openstack/keystone-849d784859-xzm4f" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.494431 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/0529a3c9-9658-422d-b7ff-9db2d402716d-credential-keys\") pod \"keystone-849d784859-xzm4f\" (UID: \"0529a3c9-9658-422d-b7ff-9db2d402716d\") " pod="openstack/keystone-849d784859-xzm4f" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.494589 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0529a3c9-9658-422d-b7ff-9db2d402716d-public-tls-certs\") pod \"keystone-849d784859-xzm4f\" (UID: \"0529a3c9-9658-422d-b7ff-9db2d402716d\") " pod="openstack/keystone-849d784859-xzm4f" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.495036 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0529a3c9-9658-422d-b7ff-9db2d402716d-scripts\") pod \"keystone-849d784859-xzm4f\" (UID: \"0529a3c9-9658-422d-b7ff-9db2d402716d\") " pod="openstack/keystone-849d784859-xzm4f" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.495285 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0529a3c9-9658-422d-b7ff-9db2d402716d-config-data\") pod \"keystone-849d784859-xzm4f\" (UID: \"0529a3c9-9658-422d-b7ff-9db2d402716d\") " pod="openstack/keystone-849d784859-xzm4f" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.495810 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0529a3c9-9658-422d-b7ff-9db2d402716d-combined-ca-bundle\") pod \"keystone-849d784859-xzm4f\" (UID: \"0529a3c9-9658-422d-b7ff-9db2d402716d\") " pod="openstack/keystone-849d784859-xzm4f" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.496556 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0529a3c9-9658-422d-b7ff-9db2d402716d-fernet-keys\") pod \"keystone-849d784859-xzm4f\" (UID: \"0529a3c9-9658-422d-b7ff-9db2d402716d\") " pod="openstack/keystone-849d784859-xzm4f" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.498169 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0529a3c9-9658-422d-b7ff-9db2d402716d-internal-tls-certs\") pod \"keystone-849d784859-xzm4f\" (UID: \"0529a3c9-9658-422d-b7ff-9db2d402716d\") " pod="openstack/keystone-849d784859-xzm4f" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.511259 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bmpr2\" (UniqueName: \"kubernetes.io/projected/0529a3c9-9658-422d-b7ff-9db2d402716d-kube-api-access-bmpr2\") pod \"keystone-849d784859-xzm4f\" (UID: \"0529a3c9-9658-422d-b7ff-9db2d402716d\") " pod="openstack/keystone-849d784859-xzm4f" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.515179 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-995749db8-8znhz" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.601872 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-849d784859-xzm4f" Nov 25 17:04:57 crc kubenswrapper[4812]: I1125 17:04:57.960404 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-995749db8-8znhz"] Nov 25 17:04:57 crc kubenswrapper[4812]: W1125 17:04:57.976789 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc94a1f67_8773_4942_b09c_fb7a0401b5eb.slice/crio-16adb0529ee8100ae502ec940796f50813a1f4925044ff318da85bf70a36fdad WatchSource:0}: Error finding container 16adb0529ee8100ae502ec940796f50813a1f4925044ff318da85bf70a36fdad: Status 404 returned error can't find the container with id 16adb0529ee8100ae502ec940796f50813a1f4925044ff318da85bf70a36fdad Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.026783 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-995749db8-8znhz" event={"ID":"c94a1f67-8773-4942-b09c-fb7a0401b5eb","Type":"ContainerStarted","Data":"16adb0529ee8100ae502ec940796f50813a1f4925044ff318da85bf70a36fdad"} Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.037990 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-849d784859-xzm4f"] Nov 25 17:04:58 crc kubenswrapper[4812]: W1125 17:04:58.054180 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0529a3c9_9658_422d_b7ff_9db2d402716d.slice/crio-b6501185850f77c4259dec3f122ca018c42117f8b6dee61d543c9e13b680ab52 WatchSource:0}: Error finding container b6501185850f77c4259dec3f122ca018c42117f8b6dee61d543c9e13b680ab52: Status 404 returned error can't find the container with id b6501185850f77c4259dec3f122ca018c42117f8b6dee61d543c9e13b680ab52 Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.186855 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-7fd84d955-4wnvs"] Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.188259 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7fd84d955-4wnvs" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.190184 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-n2tk2" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.190397 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.191497 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.205312 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-7fd84d955-4wnvs"] Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.217042 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-5578f799d-jn7vr"] Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.227211 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-5578f799d-jn7vr" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.238195 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.260437 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-5578f799d-jn7vr"] Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.304979 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad546d1a-b6f6-4079-8dff-df34f5fe3e73-combined-ca-bundle\") pod \"barbican-worker-7fd84d955-4wnvs\" (UID: \"ad546d1a-b6f6-4079-8dff-df34f5fe3e73\") " pod="openstack/barbican-worker-7fd84d955-4wnvs" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.305058 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b02008a-97f3-49d8-b10f-6ac065b5a0e3-logs\") pod \"barbican-keystone-listener-5578f799d-jn7vr\" (UID: \"4b02008a-97f3-49d8-b10f-6ac065b5a0e3\") " pod="openstack/barbican-keystone-listener-5578f799d-jn7vr" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.305086 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ad546d1a-b6f6-4079-8dff-df34f5fe3e73-config-data-custom\") pod \"barbican-worker-7fd84d955-4wnvs\" (UID: \"ad546d1a-b6f6-4079-8dff-df34f5fe3e73\") " pod="openstack/barbican-worker-7fd84d955-4wnvs" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.305124 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad546d1a-b6f6-4079-8dff-df34f5fe3e73-config-data\") pod \"barbican-worker-7fd84d955-4wnvs\" (UID: \"ad546d1a-b6f6-4079-8dff-df34f5fe3e73\") " pod="openstack/barbican-worker-7fd84d955-4wnvs" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.305176 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b02008a-97f3-49d8-b10f-6ac065b5a0e3-config-data\") pod \"barbican-keystone-listener-5578f799d-jn7vr\" (UID: \"4b02008a-97f3-49d8-b10f-6ac065b5a0e3\") " pod="openstack/barbican-keystone-listener-5578f799d-jn7vr" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.305200 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b02008a-97f3-49d8-b10f-6ac065b5a0e3-combined-ca-bundle\") pod \"barbican-keystone-listener-5578f799d-jn7vr\" (UID: \"4b02008a-97f3-49d8-b10f-6ac065b5a0e3\") " pod="openstack/barbican-keystone-listener-5578f799d-jn7vr" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.305228 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tzkpx\" (UniqueName: \"kubernetes.io/projected/ad546d1a-b6f6-4079-8dff-df34f5fe3e73-kube-api-access-tzkpx\") pod \"barbican-worker-7fd84d955-4wnvs\" (UID: \"ad546d1a-b6f6-4079-8dff-df34f5fe3e73\") " pod="openstack/barbican-worker-7fd84d955-4wnvs" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.305277 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fp5kd\" (UniqueName: \"kubernetes.io/projected/4b02008a-97f3-49d8-b10f-6ac065b5a0e3-kube-api-access-fp5kd\") pod \"barbican-keystone-listener-5578f799d-jn7vr\" (UID: \"4b02008a-97f3-49d8-b10f-6ac065b5a0e3\") " pod="openstack/barbican-keystone-listener-5578f799d-jn7vr" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.305307 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4b02008a-97f3-49d8-b10f-6ac065b5a0e3-config-data-custom\") pod \"barbican-keystone-listener-5578f799d-jn7vr\" (UID: \"4b02008a-97f3-49d8-b10f-6ac065b5a0e3\") " pod="openstack/barbican-keystone-listener-5578f799d-jn7vr" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.305329 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ad546d1a-b6f6-4079-8dff-df34f5fe3e73-logs\") pod \"barbican-worker-7fd84d955-4wnvs\" (UID: \"ad546d1a-b6f6-4079-8dff-df34f5fe3e73\") " pod="openstack/barbican-worker-7fd84d955-4wnvs" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.310873 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-699df9757c-p4d84"] Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.312873 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-699df9757c-p4d84" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.338620 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-699df9757c-p4d84"] Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.407151 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fp5kd\" (UniqueName: \"kubernetes.io/projected/4b02008a-97f3-49d8-b10f-6ac065b5a0e3-kube-api-access-fp5kd\") pod \"barbican-keystone-listener-5578f799d-jn7vr\" (UID: \"4b02008a-97f3-49d8-b10f-6ac065b5a0e3\") " pod="openstack/barbican-keystone-listener-5578f799d-jn7vr" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.407216 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4b02008a-97f3-49d8-b10f-6ac065b5a0e3-config-data-custom\") pod \"barbican-keystone-listener-5578f799d-jn7vr\" (UID: \"4b02008a-97f3-49d8-b10f-6ac065b5a0e3\") " pod="openstack/barbican-keystone-listener-5578f799d-jn7vr" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.407244 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/10e99add-334f-471f-a234-efcd2412a9e8-config\") pod \"dnsmasq-dns-699df9757c-p4d84\" (UID: \"10e99add-334f-471f-a234-efcd2412a9e8\") " pod="openstack/dnsmasq-dns-699df9757c-p4d84" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.407270 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ad546d1a-b6f6-4079-8dff-df34f5fe3e73-logs\") pod \"barbican-worker-7fd84d955-4wnvs\" (UID: \"ad546d1a-b6f6-4079-8dff-df34f5fe3e73\") " pod="openstack/barbican-worker-7fd84d955-4wnvs" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.407329 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/10e99add-334f-471f-a234-efcd2412a9e8-dns-svc\") pod \"dnsmasq-dns-699df9757c-p4d84\" (UID: \"10e99add-334f-471f-a234-efcd2412a9e8\") " pod="openstack/dnsmasq-dns-699df9757c-p4d84" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.407396 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad546d1a-b6f6-4079-8dff-df34f5fe3e73-combined-ca-bundle\") pod \"barbican-worker-7fd84d955-4wnvs\" (UID: \"ad546d1a-b6f6-4079-8dff-df34f5fe3e73\") " pod="openstack/barbican-worker-7fd84d955-4wnvs" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.407421 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/10e99add-334f-471f-a234-efcd2412a9e8-ovsdbserver-sb\") pod \"dnsmasq-dns-699df9757c-p4d84\" (UID: \"10e99add-334f-471f-a234-efcd2412a9e8\") " pod="openstack/dnsmasq-dns-699df9757c-p4d84" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.407520 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b02008a-97f3-49d8-b10f-6ac065b5a0e3-logs\") pod \"barbican-keystone-listener-5578f799d-jn7vr\" (UID: \"4b02008a-97f3-49d8-b10f-6ac065b5a0e3\") " pod="openstack/barbican-keystone-listener-5578f799d-jn7vr" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.407563 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ad546d1a-b6f6-4079-8dff-df34f5fe3e73-config-data-custom\") pod \"barbican-worker-7fd84d955-4wnvs\" (UID: \"ad546d1a-b6f6-4079-8dff-df34f5fe3e73\") " pod="openstack/barbican-worker-7fd84d955-4wnvs" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.407587 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/10e99add-334f-471f-a234-efcd2412a9e8-ovsdbserver-nb\") pod \"dnsmasq-dns-699df9757c-p4d84\" (UID: \"10e99add-334f-471f-a234-efcd2412a9e8\") " pod="openstack/dnsmasq-dns-699df9757c-p4d84" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.407616 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w4jf6\" (UniqueName: \"kubernetes.io/projected/10e99add-334f-471f-a234-efcd2412a9e8-kube-api-access-w4jf6\") pod \"dnsmasq-dns-699df9757c-p4d84\" (UID: \"10e99add-334f-471f-a234-efcd2412a9e8\") " pod="openstack/dnsmasq-dns-699df9757c-p4d84" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.407649 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad546d1a-b6f6-4079-8dff-df34f5fe3e73-config-data\") pod \"barbican-worker-7fd84d955-4wnvs\" (UID: \"ad546d1a-b6f6-4079-8dff-df34f5fe3e73\") " pod="openstack/barbican-worker-7fd84d955-4wnvs" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.407706 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b02008a-97f3-49d8-b10f-6ac065b5a0e3-config-data\") pod \"barbican-keystone-listener-5578f799d-jn7vr\" (UID: \"4b02008a-97f3-49d8-b10f-6ac065b5a0e3\") " pod="openstack/barbican-keystone-listener-5578f799d-jn7vr" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.407734 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b02008a-97f3-49d8-b10f-6ac065b5a0e3-combined-ca-bundle\") pod \"barbican-keystone-listener-5578f799d-jn7vr\" (UID: \"4b02008a-97f3-49d8-b10f-6ac065b5a0e3\") " pod="openstack/barbican-keystone-listener-5578f799d-jn7vr" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.407765 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tzkpx\" (UniqueName: \"kubernetes.io/projected/ad546d1a-b6f6-4079-8dff-df34f5fe3e73-kube-api-access-tzkpx\") pod \"barbican-worker-7fd84d955-4wnvs\" (UID: \"ad546d1a-b6f6-4079-8dff-df34f5fe3e73\") " pod="openstack/barbican-worker-7fd84d955-4wnvs" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.408949 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ad546d1a-b6f6-4079-8dff-df34f5fe3e73-logs\") pod \"barbican-worker-7fd84d955-4wnvs\" (UID: \"ad546d1a-b6f6-4079-8dff-df34f5fe3e73\") " pod="openstack/barbican-worker-7fd84d955-4wnvs" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.409145 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b02008a-97f3-49d8-b10f-6ac065b5a0e3-logs\") pod \"barbican-keystone-listener-5578f799d-jn7vr\" (UID: \"4b02008a-97f3-49d8-b10f-6ac065b5a0e3\") " pod="openstack/barbican-keystone-listener-5578f799d-jn7vr" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.420762 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad546d1a-b6f6-4079-8dff-df34f5fe3e73-combined-ca-bundle\") pod \"barbican-worker-7fd84d955-4wnvs\" (UID: \"ad546d1a-b6f6-4079-8dff-df34f5fe3e73\") " pod="openstack/barbican-worker-7fd84d955-4wnvs" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.426912 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad546d1a-b6f6-4079-8dff-df34f5fe3e73-config-data\") pod \"barbican-worker-7fd84d955-4wnvs\" (UID: \"ad546d1a-b6f6-4079-8dff-df34f5fe3e73\") " pod="openstack/barbican-worker-7fd84d955-4wnvs" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.427219 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ad546d1a-b6f6-4079-8dff-df34f5fe3e73-config-data-custom\") pod \"barbican-worker-7fd84d955-4wnvs\" (UID: \"ad546d1a-b6f6-4079-8dff-df34f5fe3e73\") " pod="openstack/barbican-worker-7fd84d955-4wnvs" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.427361 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b02008a-97f3-49d8-b10f-6ac065b5a0e3-combined-ca-bundle\") pod \"barbican-keystone-listener-5578f799d-jn7vr\" (UID: \"4b02008a-97f3-49d8-b10f-6ac065b5a0e3\") " pod="openstack/barbican-keystone-listener-5578f799d-jn7vr" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.430407 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tzkpx\" (UniqueName: \"kubernetes.io/projected/ad546d1a-b6f6-4079-8dff-df34f5fe3e73-kube-api-access-tzkpx\") pod \"barbican-worker-7fd84d955-4wnvs\" (UID: \"ad546d1a-b6f6-4079-8dff-df34f5fe3e73\") " pod="openstack/barbican-worker-7fd84d955-4wnvs" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.432931 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4b02008a-97f3-49d8-b10f-6ac065b5a0e3-config-data-custom\") pod \"barbican-keystone-listener-5578f799d-jn7vr\" (UID: \"4b02008a-97f3-49d8-b10f-6ac065b5a0e3\") " pod="openstack/barbican-keystone-listener-5578f799d-jn7vr" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.448093 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fp5kd\" (UniqueName: \"kubernetes.io/projected/4b02008a-97f3-49d8-b10f-6ac065b5a0e3-kube-api-access-fp5kd\") pod \"barbican-keystone-listener-5578f799d-jn7vr\" (UID: \"4b02008a-97f3-49d8-b10f-6ac065b5a0e3\") " pod="openstack/barbican-keystone-listener-5578f799d-jn7vr" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.455448 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-7766846cbb-5x8sl"] Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.458734 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b02008a-97f3-49d8-b10f-6ac065b5a0e3-config-data\") pod \"barbican-keystone-listener-5578f799d-jn7vr\" (UID: \"4b02008a-97f3-49d8-b10f-6ac065b5a0e3\") " pod="openstack/barbican-keystone-listener-5578f799d-jn7vr" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.461371 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7766846cbb-5x8sl" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.463905 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.481819 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7766846cbb-5x8sl"] Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.509703 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/10e99add-334f-471f-a234-efcd2412a9e8-ovsdbserver-sb\") pod \"dnsmasq-dns-699df9757c-p4d84\" (UID: \"10e99add-334f-471f-a234-efcd2412a9e8\") " pod="openstack/dnsmasq-dns-699df9757c-p4d84" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.509801 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d1ed42ec-dde8-4eae-9618-252764bca23d-config-data-custom\") pod \"barbican-api-7766846cbb-5x8sl\" (UID: \"d1ed42ec-dde8-4eae-9618-252764bca23d\") " pod="openstack/barbican-api-7766846cbb-5x8sl" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.509828 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/10e99add-334f-471f-a234-efcd2412a9e8-ovsdbserver-nb\") pod \"dnsmasq-dns-699df9757c-p4d84\" (UID: \"10e99add-334f-471f-a234-efcd2412a9e8\") " pod="openstack/dnsmasq-dns-699df9757c-p4d84" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.509861 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w4jf6\" (UniqueName: \"kubernetes.io/projected/10e99add-334f-471f-a234-efcd2412a9e8-kube-api-access-w4jf6\") pod \"dnsmasq-dns-699df9757c-p4d84\" (UID: \"10e99add-334f-471f-a234-efcd2412a9e8\") " pod="openstack/dnsmasq-dns-699df9757c-p4d84" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.509893 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1ed42ec-dde8-4eae-9618-252764bca23d-config-data\") pod \"barbican-api-7766846cbb-5x8sl\" (UID: \"d1ed42ec-dde8-4eae-9618-252764bca23d\") " pod="openstack/barbican-api-7766846cbb-5x8sl" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.510007 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nkpd8\" (UniqueName: \"kubernetes.io/projected/d1ed42ec-dde8-4eae-9618-252764bca23d-kube-api-access-nkpd8\") pod \"barbican-api-7766846cbb-5x8sl\" (UID: \"d1ed42ec-dde8-4eae-9618-252764bca23d\") " pod="openstack/barbican-api-7766846cbb-5x8sl" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.510049 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/10e99add-334f-471f-a234-efcd2412a9e8-config\") pod \"dnsmasq-dns-699df9757c-p4d84\" (UID: \"10e99add-334f-471f-a234-efcd2412a9e8\") " pod="openstack/dnsmasq-dns-699df9757c-p4d84" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.510115 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/10e99add-334f-471f-a234-efcd2412a9e8-dns-svc\") pod \"dnsmasq-dns-699df9757c-p4d84\" (UID: \"10e99add-334f-471f-a234-efcd2412a9e8\") " pod="openstack/dnsmasq-dns-699df9757c-p4d84" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.510173 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1ed42ec-dde8-4eae-9618-252764bca23d-combined-ca-bundle\") pod \"barbican-api-7766846cbb-5x8sl\" (UID: \"d1ed42ec-dde8-4eae-9618-252764bca23d\") " pod="openstack/barbican-api-7766846cbb-5x8sl" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.510197 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1ed42ec-dde8-4eae-9618-252764bca23d-logs\") pod \"barbican-api-7766846cbb-5x8sl\" (UID: \"d1ed42ec-dde8-4eae-9618-252764bca23d\") " pod="openstack/barbican-api-7766846cbb-5x8sl" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.511248 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/10e99add-334f-471f-a234-efcd2412a9e8-ovsdbserver-sb\") pod \"dnsmasq-dns-699df9757c-p4d84\" (UID: \"10e99add-334f-471f-a234-efcd2412a9e8\") " pod="openstack/dnsmasq-dns-699df9757c-p4d84" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.512078 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/10e99add-334f-471f-a234-efcd2412a9e8-ovsdbserver-nb\") pod \"dnsmasq-dns-699df9757c-p4d84\" (UID: \"10e99add-334f-471f-a234-efcd2412a9e8\") " pod="openstack/dnsmasq-dns-699df9757c-p4d84" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.514624 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/10e99add-334f-471f-a234-efcd2412a9e8-dns-svc\") pod \"dnsmasq-dns-699df9757c-p4d84\" (UID: \"10e99add-334f-471f-a234-efcd2412a9e8\") " pod="openstack/dnsmasq-dns-699df9757c-p4d84" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.515037 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/10e99add-334f-471f-a234-efcd2412a9e8-config\") pod \"dnsmasq-dns-699df9757c-p4d84\" (UID: \"10e99add-334f-471f-a234-efcd2412a9e8\") " pod="openstack/dnsmasq-dns-699df9757c-p4d84" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.529094 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w4jf6\" (UniqueName: \"kubernetes.io/projected/10e99add-334f-471f-a234-efcd2412a9e8-kube-api-access-w4jf6\") pod \"dnsmasq-dns-699df9757c-p4d84\" (UID: \"10e99add-334f-471f-a234-efcd2412a9e8\") " pod="openstack/dnsmasq-dns-699df9757c-p4d84" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.612673 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d1ed42ec-dde8-4eae-9618-252764bca23d-config-data-custom\") pod \"barbican-api-7766846cbb-5x8sl\" (UID: \"d1ed42ec-dde8-4eae-9618-252764bca23d\") " pod="openstack/barbican-api-7766846cbb-5x8sl" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.612732 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1ed42ec-dde8-4eae-9618-252764bca23d-config-data\") pod \"barbican-api-7766846cbb-5x8sl\" (UID: \"d1ed42ec-dde8-4eae-9618-252764bca23d\") " pod="openstack/barbican-api-7766846cbb-5x8sl" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.612783 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nkpd8\" (UniqueName: \"kubernetes.io/projected/d1ed42ec-dde8-4eae-9618-252764bca23d-kube-api-access-nkpd8\") pod \"barbican-api-7766846cbb-5x8sl\" (UID: \"d1ed42ec-dde8-4eae-9618-252764bca23d\") " pod="openstack/barbican-api-7766846cbb-5x8sl" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.612854 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1ed42ec-dde8-4eae-9618-252764bca23d-combined-ca-bundle\") pod \"barbican-api-7766846cbb-5x8sl\" (UID: \"d1ed42ec-dde8-4eae-9618-252764bca23d\") " pod="openstack/barbican-api-7766846cbb-5x8sl" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.612877 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1ed42ec-dde8-4eae-9618-252764bca23d-logs\") pod \"barbican-api-7766846cbb-5x8sl\" (UID: \"d1ed42ec-dde8-4eae-9618-252764bca23d\") " pod="openstack/barbican-api-7766846cbb-5x8sl" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.613296 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1ed42ec-dde8-4eae-9618-252764bca23d-logs\") pod \"barbican-api-7766846cbb-5x8sl\" (UID: \"d1ed42ec-dde8-4eae-9618-252764bca23d\") " pod="openstack/barbican-api-7766846cbb-5x8sl" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.617969 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1ed42ec-dde8-4eae-9618-252764bca23d-config-data\") pod \"barbican-api-7766846cbb-5x8sl\" (UID: \"d1ed42ec-dde8-4eae-9618-252764bca23d\") " pod="openstack/barbican-api-7766846cbb-5x8sl" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.617997 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1ed42ec-dde8-4eae-9618-252764bca23d-combined-ca-bundle\") pod \"barbican-api-7766846cbb-5x8sl\" (UID: \"d1ed42ec-dde8-4eae-9618-252764bca23d\") " pod="openstack/barbican-api-7766846cbb-5x8sl" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.619146 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d1ed42ec-dde8-4eae-9618-252764bca23d-config-data-custom\") pod \"barbican-api-7766846cbb-5x8sl\" (UID: \"d1ed42ec-dde8-4eae-9618-252764bca23d\") " pod="openstack/barbican-api-7766846cbb-5x8sl" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.633490 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nkpd8\" (UniqueName: \"kubernetes.io/projected/d1ed42ec-dde8-4eae-9618-252764bca23d-kube-api-access-nkpd8\") pod \"barbican-api-7766846cbb-5x8sl\" (UID: \"d1ed42ec-dde8-4eae-9618-252764bca23d\") " pod="openstack/barbican-api-7766846cbb-5x8sl" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.639274 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7fd84d955-4wnvs" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.669441 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-5578f799d-jn7vr" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.683141 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-699df9757c-p4d84" Nov 25 17:04:58 crc kubenswrapper[4812]: I1125 17:04:58.822191 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7766846cbb-5x8sl" Nov 25 17:04:59 crc kubenswrapper[4812]: I1125 17:04:59.057062 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-849d784859-xzm4f" event={"ID":"0529a3c9-9658-422d-b7ff-9db2d402716d","Type":"ContainerStarted","Data":"4a2fbe8937a7b74086f3d3672ff27e060052c685155179f19416a58e369b19fb"} Nov 25 17:04:59 crc kubenswrapper[4812]: I1125 17:04:59.057583 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-849d784859-xzm4f" event={"ID":"0529a3c9-9658-422d-b7ff-9db2d402716d","Type":"ContainerStarted","Data":"b6501185850f77c4259dec3f122ca018c42117f8b6dee61d543c9e13b680ab52"} Nov 25 17:04:59 crc kubenswrapper[4812]: I1125 17:04:59.058888 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-849d784859-xzm4f" Nov 25 17:04:59 crc kubenswrapper[4812]: I1125 17:04:59.087865 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-849d784859-xzm4f" podStartSLOduration=2.087839383 podStartE2EDuration="2.087839383s" podCreationTimestamp="2025-11-25 17:04:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:04:59.079226907 +0000 UTC m=+1073.919369012" watchObservedRunningTime="2025-11-25 17:04:59.087839383 +0000 UTC m=+1073.927981468" Nov 25 17:04:59 crc kubenswrapper[4812]: I1125 17:04:59.102813 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-995749db8-8znhz" event={"ID":"c94a1f67-8773-4942-b09c-fb7a0401b5eb","Type":"ContainerStarted","Data":"9d92c60d3c0b5ee00d4c6021d381afe250393dc738d4bd61a978129ae4e176cf"} Nov 25 17:04:59 crc kubenswrapper[4812]: I1125 17:04:59.102902 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-995749db8-8znhz" event={"ID":"c94a1f67-8773-4942-b09c-fb7a0401b5eb","Type":"ContainerStarted","Data":"88cd3376093930f54a52807b98c96b18109482bb5992a856ec28f4e51dbcc179"} Nov 25 17:04:59 crc kubenswrapper[4812]: I1125 17:04:59.104965 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-995749db8-8znhz" Nov 25 17:04:59 crc kubenswrapper[4812]: I1125 17:04:59.105103 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-995749db8-8znhz" Nov 25 17:04:59 crc kubenswrapper[4812]: I1125 17:04:59.134663 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-995749db8-8znhz" podStartSLOduration=2.134612523 podStartE2EDuration="2.134612523s" podCreationTimestamp="2025-11-25 17:04:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:04:59.131705013 +0000 UTC m=+1073.971847108" watchObservedRunningTime="2025-11-25 17:04:59.134612523 +0000 UTC m=+1073.974754618" Nov 25 17:04:59 crc kubenswrapper[4812]: I1125 17:04:59.300632 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-5578f799d-jn7vr"] Nov 25 17:04:59 crc kubenswrapper[4812]: I1125 17:04:59.360691 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7766846cbb-5x8sl"] Nov 25 17:04:59 crc kubenswrapper[4812]: I1125 17:04:59.374794 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-7fd84d955-4wnvs"] Nov 25 17:04:59 crc kubenswrapper[4812]: W1125 17:04:59.382847 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podad546d1a_b6f6_4079_8dff_df34f5fe3e73.slice/crio-395c71e78cbb79347e99134c5933561590acec8a3017899045779c1303158c1b WatchSource:0}: Error finding container 395c71e78cbb79347e99134c5933561590acec8a3017899045779c1303158c1b: Status 404 returned error can't find the container with id 395c71e78cbb79347e99134c5933561590acec8a3017899045779c1303158c1b Nov 25 17:04:59 crc kubenswrapper[4812]: I1125 17:04:59.469711 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-699df9757c-p4d84"] Nov 25 17:05:00 crc kubenswrapper[4812]: I1125 17:05:00.118367 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-5578f799d-jn7vr" event={"ID":"4b02008a-97f3-49d8-b10f-6ac065b5a0e3","Type":"ContainerStarted","Data":"dbbd75c12cb1104683f7236f6937d12147173f57594f2121e695b8e90de04bdb"} Nov 25 17:05:00 crc kubenswrapper[4812]: I1125 17:05:00.121022 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7766846cbb-5x8sl" event={"ID":"d1ed42ec-dde8-4eae-9618-252764bca23d","Type":"ContainerStarted","Data":"9d2ef0c382cb0987de607b58407525b5473e5b65eb8dfac7e73d27372b080525"} Nov 25 17:05:00 crc kubenswrapper[4812]: I1125 17:05:00.122243 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7fd84d955-4wnvs" event={"ID":"ad546d1a-b6f6-4079-8dff-df34f5fe3e73","Type":"ContainerStarted","Data":"395c71e78cbb79347e99134c5933561590acec8a3017899045779c1303158c1b"} Nov 25 17:05:00 crc kubenswrapper[4812]: W1125 17:05:00.207041 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod10e99add_334f_471f_a234_efcd2412a9e8.slice/crio-a531189902edc2f909801d3110ecb8ce00ae47f75be6da3980bb05ea5781df8e WatchSource:0}: Error finding container a531189902edc2f909801d3110ecb8ce00ae47f75be6da3980bb05ea5781df8e: Status 404 returned error can't find the container with id a531189902edc2f909801d3110ecb8ce00ae47f75be6da3980bb05ea5781df8e Nov 25 17:05:00 crc kubenswrapper[4812]: I1125 17:05:00.526330 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-c55b7b786-smvdd"] Nov 25 17:05:00 crc kubenswrapper[4812]: I1125 17:05:00.532335 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-c55b7b786-smvdd" Nov 25 17:05:00 crc kubenswrapper[4812]: I1125 17:05:00.537501 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 25 17:05:00 crc kubenswrapper[4812]: I1125 17:05:00.537664 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 25 17:05:00 crc kubenswrapper[4812]: I1125 17:05:00.544590 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-c55b7b786-smvdd"] Nov 25 17:05:00 crc kubenswrapper[4812]: I1125 17:05:00.653371 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/98e49fd5-9d81-4492-a988-483710009e98-logs\") pod \"barbican-api-c55b7b786-smvdd\" (UID: \"98e49fd5-9d81-4492-a988-483710009e98\") " pod="openstack/barbican-api-c55b7b786-smvdd" Nov 25 17:05:00 crc kubenswrapper[4812]: I1125 17:05:00.653424 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98e49fd5-9d81-4492-a988-483710009e98-config-data\") pod \"barbican-api-c55b7b786-smvdd\" (UID: \"98e49fd5-9d81-4492-a988-483710009e98\") " pod="openstack/barbican-api-c55b7b786-smvdd" Nov 25 17:05:00 crc kubenswrapper[4812]: I1125 17:05:00.653603 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/98e49fd5-9d81-4492-a988-483710009e98-internal-tls-certs\") pod \"barbican-api-c55b7b786-smvdd\" (UID: \"98e49fd5-9d81-4492-a988-483710009e98\") " pod="openstack/barbican-api-c55b7b786-smvdd" Nov 25 17:05:00 crc kubenswrapper[4812]: I1125 17:05:00.653698 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/98e49fd5-9d81-4492-a988-483710009e98-public-tls-certs\") pod \"barbican-api-c55b7b786-smvdd\" (UID: \"98e49fd5-9d81-4492-a988-483710009e98\") " pod="openstack/barbican-api-c55b7b786-smvdd" Nov 25 17:05:00 crc kubenswrapper[4812]: I1125 17:05:00.653717 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98e49fd5-9d81-4492-a988-483710009e98-combined-ca-bundle\") pod \"barbican-api-c55b7b786-smvdd\" (UID: \"98e49fd5-9d81-4492-a988-483710009e98\") " pod="openstack/barbican-api-c55b7b786-smvdd" Nov 25 17:05:00 crc kubenswrapper[4812]: I1125 17:05:00.653735 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lkfmx\" (UniqueName: \"kubernetes.io/projected/98e49fd5-9d81-4492-a988-483710009e98-kube-api-access-lkfmx\") pod \"barbican-api-c55b7b786-smvdd\" (UID: \"98e49fd5-9d81-4492-a988-483710009e98\") " pod="openstack/barbican-api-c55b7b786-smvdd" Nov 25 17:05:00 crc kubenswrapper[4812]: I1125 17:05:00.653807 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/98e49fd5-9d81-4492-a988-483710009e98-config-data-custom\") pod \"barbican-api-c55b7b786-smvdd\" (UID: \"98e49fd5-9d81-4492-a988-483710009e98\") " pod="openstack/barbican-api-c55b7b786-smvdd" Nov 25 17:05:00 crc kubenswrapper[4812]: I1125 17:05:00.755086 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98e49fd5-9d81-4492-a988-483710009e98-config-data\") pod \"barbican-api-c55b7b786-smvdd\" (UID: \"98e49fd5-9d81-4492-a988-483710009e98\") " pod="openstack/barbican-api-c55b7b786-smvdd" Nov 25 17:05:00 crc kubenswrapper[4812]: I1125 17:05:00.755171 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/98e49fd5-9d81-4492-a988-483710009e98-internal-tls-certs\") pod \"barbican-api-c55b7b786-smvdd\" (UID: \"98e49fd5-9d81-4492-a988-483710009e98\") " pod="openstack/barbican-api-c55b7b786-smvdd" Nov 25 17:05:00 crc kubenswrapper[4812]: I1125 17:05:00.755260 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/98e49fd5-9d81-4492-a988-483710009e98-public-tls-certs\") pod \"barbican-api-c55b7b786-smvdd\" (UID: \"98e49fd5-9d81-4492-a988-483710009e98\") " pod="openstack/barbican-api-c55b7b786-smvdd" Nov 25 17:05:00 crc kubenswrapper[4812]: I1125 17:05:00.755282 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98e49fd5-9d81-4492-a988-483710009e98-combined-ca-bundle\") pod \"barbican-api-c55b7b786-smvdd\" (UID: \"98e49fd5-9d81-4492-a988-483710009e98\") " pod="openstack/barbican-api-c55b7b786-smvdd" Nov 25 17:05:00 crc kubenswrapper[4812]: I1125 17:05:00.755311 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lkfmx\" (UniqueName: \"kubernetes.io/projected/98e49fd5-9d81-4492-a988-483710009e98-kube-api-access-lkfmx\") pod \"barbican-api-c55b7b786-smvdd\" (UID: \"98e49fd5-9d81-4492-a988-483710009e98\") " pod="openstack/barbican-api-c55b7b786-smvdd" Nov 25 17:05:00 crc kubenswrapper[4812]: I1125 17:05:00.755363 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/98e49fd5-9d81-4492-a988-483710009e98-config-data-custom\") pod \"barbican-api-c55b7b786-smvdd\" (UID: \"98e49fd5-9d81-4492-a988-483710009e98\") " pod="openstack/barbican-api-c55b7b786-smvdd" Nov 25 17:05:00 crc kubenswrapper[4812]: I1125 17:05:00.755394 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/98e49fd5-9d81-4492-a988-483710009e98-logs\") pod \"barbican-api-c55b7b786-smvdd\" (UID: \"98e49fd5-9d81-4492-a988-483710009e98\") " pod="openstack/barbican-api-c55b7b786-smvdd" Nov 25 17:05:00 crc kubenswrapper[4812]: I1125 17:05:00.755860 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/98e49fd5-9d81-4492-a988-483710009e98-logs\") pod \"barbican-api-c55b7b786-smvdd\" (UID: \"98e49fd5-9d81-4492-a988-483710009e98\") " pod="openstack/barbican-api-c55b7b786-smvdd" Nov 25 17:05:00 crc kubenswrapper[4812]: I1125 17:05:00.760934 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/98e49fd5-9d81-4492-a988-483710009e98-internal-tls-certs\") pod \"barbican-api-c55b7b786-smvdd\" (UID: \"98e49fd5-9d81-4492-a988-483710009e98\") " pod="openstack/barbican-api-c55b7b786-smvdd" Nov 25 17:05:00 crc kubenswrapper[4812]: I1125 17:05:00.761188 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/98e49fd5-9d81-4492-a988-483710009e98-public-tls-certs\") pod \"barbican-api-c55b7b786-smvdd\" (UID: \"98e49fd5-9d81-4492-a988-483710009e98\") " pod="openstack/barbican-api-c55b7b786-smvdd" Nov 25 17:05:00 crc kubenswrapper[4812]: I1125 17:05:00.761346 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/98e49fd5-9d81-4492-a988-483710009e98-config-data-custom\") pod \"barbican-api-c55b7b786-smvdd\" (UID: \"98e49fd5-9d81-4492-a988-483710009e98\") " pod="openstack/barbican-api-c55b7b786-smvdd" Nov 25 17:05:00 crc kubenswrapper[4812]: I1125 17:05:00.761690 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98e49fd5-9d81-4492-a988-483710009e98-combined-ca-bundle\") pod \"barbican-api-c55b7b786-smvdd\" (UID: \"98e49fd5-9d81-4492-a988-483710009e98\") " pod="openstack/barbican-api-c55b7b786-smvdd" Nov 25 17:05:00 crc kubenswrapper[4812]: I1125 17:05:00.767053 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98e49fd5-9d81-4492-a988-483710009e98-config-data\") pod \"barbican-api-c55b7b786-smvdd\" (UID: \"98e49fd5-9d81-4492-a988-483710009e98\") " pod="openstack/barbican-api-c55b7b786-smvdd" Nov 25 17:05:00 crc kubenswrapper[4812]: I1125 17:05:00.777455 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lkfmx\" (UniqueName: \"kubernetes.io/projected/98e49fd5-9d81-4492-a988-483710009e98-kube-api-access-lkfmx\") pod \"barbican-api-c55b7b786-smvdd\" (UID: \"98e49fd5-9d81-4492-a988-483710009e98\") " pod="openstack/barbican-api-c55b7b786-smvdd" Nov 25 17:05:00 crc kubenswrapper[4812]: I1125 17:05:00.862874 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-c55b7b786-smvdd" Nov 25 17:05:01 crc kubenswrapper[4812]: I1125 17:05:01.130809 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7766846cbb-5x8sl" event={"ID":"d1ed42ec-dde8-4eae-9618-252764bca23d","Type":"ContainerStarted","Data":"9b2c13eb71d02f3604a231521a62f15a9121d3ba9aeb0af084eeaac920eccf6b"} Nov 25 17:05:01 crc kubenswrapper[4812]: I1125 17:05:01.132213 4812 generic.go:334] "Generic (PLEG): container finished" podID="10e99add-334f-471f-a234-efcd2412a9e8" containerID="62ae12ee4fcda9363589415cf3f390f51be2955e9de9297e90ae37255ee9b9a4" exitCode=0 Nov 25 17:05:01 crc kubenswrapper[4812]: I1125 17:05:01.132271 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-699df9757c-p4d84" event={"ID":"10e99add-334f-471f-a234-efcd2412a9e8","Type":"ContainerDied","Data":"62ae12ee4fcda9363589415cf3f390f51be2955e9de9297e90ae37255ee9b9a4"} Nov 25 17:05:01 crc kubenswrapper[4812]: I1125 17:05:01.132300 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-699df9757c-p4d84" event={"ID":"10e99add-334f-471f-a234-efcd2412a9e8","Type":"ContainerStarted","Data":"a531189902edc2f909801d3110ecb8ce00ae47f75be6da3980bb05ea5781df8e"} Nov 25 17:05:05 crc kubenswrapper[4812]: I1125 17:05:05.175958 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7766846cbb-5x8sl" event={"ID":"d1ed42ec-dde8-4eae-9618-252764bca23d","Type":"ContainerStarted","Data":"0f8b2d066c37f9772f302e94c59a96d2b7e66380be7993bc1fa455b1fc8b9489"} Nov 25 17:05:05 crc kubenswrapper[4812]: I1125 17:05:05.176679 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7766846cbb-5x8sl" Nov 25 17:05:05 crc kubenswrapper[4812]: I1125 17:05:05.176981 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7766846cbb-5x8sl" Nov 25 17:05:05 crc kubenswrapper[4812]: I1125 17:05:05.178031 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-7766846cbb-5x8sl" podUID="d1ed42ec-dde8-4eae-9618-252764bca23d" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.148:9311/healthcheck\": dial tcp 10.217.0.148:9311: connect: connection refused" Nov 25 17:05:05 crc kubenswrapper[4812]: I1125 17:05:05.182194 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-699df9757c-p4d84" event={"ID":"10e99add-334f-471f-a234-efcd2412a9e8","Type":"ContainerStarted","Data":"3d8da70f6fb53b14c832d83d17d2a1ad9ed221c1dd5c8a1599915fa1fd468304"} Nov 25 17:05:05 crc kubenswrapper[4812]: I1125 17:05:05.182368 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-699df9757c-p4d84" Nov 25 17:05:05 crc kubenswrapper[4812]: I1125 17:05:05.201038 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-7766846cbb-5x8sl" podStartSLOduration=7.201020569 podStartE2EDuration="7.201020569s" podCreationTimestamp="2025-11-25 17:04:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:05:05.196107675 +0000 UTC m=+1080.036249780" watchObservedRunningTime="2025-11-25 17:05:05.201020569 +0000 UTC m=+1080.041162674" Nov 25 17:05:05 crc kubenswrapper[4812]: I1125 17:05:05.218041 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-699df9757c-p4d84" podStartSLOduration=7.218023045 podStartE2EDuration="7.218023045s" podCreationTimestamp="2025-11-25 17:04:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:05:05.212756711 +0000 UTC m=+1080.052898806" watchObservedRunningTime="2025-11-25 17:05:05.218023045 +0000 UTC m=+1080.058165140" Nov 25 17:05:05 crc kubenswrapper[4812]: I1125 17:05:05.247141 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-c55b7b786-smvdd"] Nov 25 17:05:06 crc kubenswrapper[4812]: I1125 17:05:06.191787 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-c55b7b786-smvdd" event={"ID":"98e49fd5-9d81-4492-a988-483710009e98","Type":"ContainerStarted","Data":"ae4258af5d571afdaf0655681002b1344fb4387c9a5b102052c0786ad956d3e3"} Nov 25 17:05:06 crc kubenswrapper[4812]: I1125 17:05:06.194180 4812 generic.go:334] "Generic (PLEG): container finished" podID="046e94e3-a63c-490b-8fb1-db6592742208" containerID="749d71bc9d70c64e1e650dd6acc3ee5198ff40adf1d41c141775b91a57048ead" exitCode=0 Nov 25 17:05:06 crc kubenswrapper[4812]: I1125 17:05:06.194250 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-f9kvr" event={"ID":"046e94e3-a63c-490b-8fb1-db6592742208","Type":"ContainerDied","Data":"749d71bc9d70c64e1e650dd6acc3ee5198ff40adf1d41c141775b91a57048ead"} Nov 25 17:05:06 crc kubenswrapper[4812]: E1125 17:05:06.595478 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="2e50f7bc-1b06-447d-b556-6a7adc34b072" Nov 25 17:05:07 crc kubenswrapper[4812]: I1125 17:05:07.202918 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-xz5j9" event={"ID":"ee5fe32b-eefd-4847-a053-b72c9f06e3b1","Type":"ContainerStarted","Data":"16b082e6594e10bff3c6e967d7fb7d3b81f5eb29c1cb6fe7e0f42347cb6a4353"} Nov 25 17:05:07 crc kubenswrapper[4812]: I1125 17:05:07.205352 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2e50f7bc-1b06-447d-b556-6a7adc34b072","Type":"ContainerStarted","Data":"54644c49a95a0d072cd8ced127bfec2ad39ff6e3eb3f8979d4a006d85198839d"} Nov 25 17:05:07 crc kubenswrapper[4812]: I1125 17:05:07.205520 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 17:05:07 crc kubenswrapper[4812]: I1125 17:05:07.205545 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2e50f7bc-1b06-447d-b556-6a7adc34b072" containerName="sg-core" containerID="cri-o://9b0cc1fbc772f906886862761f9121665dc887ae5cbe68f38c4b4b393e63546a" gracePeriod=30 Nov 25 17:05:07 crc kubenswrapper[4812]: I1125 17:05:07.205504 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2e50f7bc-1b06-447d-b556-6a7adc34b072" containerName="ceilometer-notification-agent" containerID="cri-o://957cdd1a798ab2cc8cb5d049404b30e0168325ba9f8ec92c713bba8568e809a1" gracePeriod=30 Nov 25 17:05:07 crc kubenswrapper[4812]: I1125 17:05:07.205520 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2e50f7bc-1b06-447d-b556-6a7adc34b072" containerName="proxy-httpd" containerID="cri-o://54644c49a95a0d072cd8ced127bfec2ad39ff6e3eb3f8979d4a006d85198839d" gracePeriod=30 Nov 25 17:05:07 crc kubenswrapper[4812]: I1125 17:05:07.209721 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7fd84d955-4wnvs" event={"ID":"ad546d1a-b6f6-4079-8dff-df34f5fe3e73","Type":"ContainerStarted","Data":"5cee223964d0f1badf491838acce64c2c766e8adac0d7e1adb0bcc5de506df13"} Nov 25 17:05:07 crc kubenswrapper[4812]: I1125 17:05:07.209785 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7fd84d955-4wnvs" event={"ID":"ad546d1a-b6f6-4079-8dff-df34f5fe3e73","Type":"ContainerStarted","Data":"89e629d50499e6f223656761be198e0a275a8ef57e7e372137a898e3bcc5871a"} Nov 25 17:05:07 crc kubenswrapper[4812]: I1125 17:05:07.216645 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-5578f799d-jn7vr" event={"ID":"4b02008a-97f3-49d8-b10f-6ac065b5a0e3","Type":"ContainerStarted","Data":"d4be765d042f7513ef7638d7ed634953a866d1cae825e3182a987dad8fc455de"} Nov 25 17:05:07 crc kubenswrapper[4812]: I1125 17:05:07.216700 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-5578f799d-jn7vr" event={"ID":"4b02008a-97f3-49d8-b10f-6ac065b5a0e3","Type":"ContainerStarted","Data":"a5d429c865ff397a00c82dac8f196b9e4f8d0bba9753a1e5180eae267cdda264"} Nov 25 17:05:07 crc kubenswrapper[4812]: I1125 17:05:07.221379 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-c55b7b786-smvdd" event={"ID":"98e49fd5-9d81-4492-a988-483710009e98","Type":"ContainerStarted","Data":"829ebcfde479493f1ab97ebee8d62b1d8c129863c89716f1915128d9ea6e6908"} Nov 25 17:05:07 crc kubenswrapper[4812]: I1125 17:05:07.221431 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-c55b7b786-smvdd" event={"ID":"98e49fd5-9d81-4492-a988-483710009e98","Type":"ContainerStarted","Data":"8e96db7682adf995bbdf234c87f210f5f6eb93586b8c0dd17233a056f46d2443"} Nov 25 17:05:07 crc kubenswrapper[4812]: I1125 17:05:07.221516 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-c55b7b786-smvdd" Nov 25 17:05:07 crc kubenswrapper[4812]: I1125 17:05:07.221641 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-c55b7b786-smvdd" Nov 25 17:05:07 crc kubenswrapper[4812]: I1125 17:05:07.238136 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-xz5j9" podStartSLOduration=2.935440631 podStartE2EDuration="56.238112465s" podCreationTimestamp="2025-11-25 17:04:11 +0000 UTC" firstStartedPulling="2025-11-25 17:04:12.91477077 +0000 UTC m=+1027.754912865" lastFinishedPulling="2025-11-25 17:05:06.217442604 +0000 UTC m=+1081.057584699" observedRunningTime="2025-11-25 17:05:07.236266795 +0000 UTC m=+1082.076408900" watchObservedRunningTime="2025-11-25 17:05:07.238112465 +0000 UTC m=+1082.078254560" Nov 25 17:05:07 crc kubenswrapper[4812]: I1125 17:05:07.282282 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-7fd84d955-4wnvs" podStartSLOduration=3.2643547760000002 podStartE2EDuration="9.282255384s" podCreationTimestamp="2025-11-25 17:04:58 +0000 UTC" firstStartedPulling="2025-11-25 17:05:00.202085246 +0000 UTC m=+1075.042227351" lastFinishedPulling="2025-11-25 17:05:06.219985864 +0000 UTC m=+1081.060127959" observedRunningTime="2025-11-25 17:05:07.272494287 +0000 UTC m=+1082.112636392" watchObservedRunningTime="2025-11-25 17:05:07.282255384 +0000 UTC m=+1082.122397479" Nov 25 17:05:07 crc kubenswrapper[4812]: I1125 17:05:07.309096 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-c55b7b786-smvdd" podStartSLOduration=7.309069578 podStartE2EDuration="7.309069578s" podCreationTimestamp="2025-11-25 17:05:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:05:07.296916845 +0000 UTC m=+1082.137058950" watchObservedRunningTime="2025-11-25 17:05:07.309069578 +0000 UTC m=+1082.149211673" Nov 25 17:05:07 crc kubenswrapper[4812]: I1125 17:05:07.334864 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-5578f799d-jn7vr" podStartSLOduration=2.446103179 podStartE2EDuration="9.334839254s" podCreationTimestamp="2025-11-25 17:04:58 +0000 UTC" firstStartedPulling="2025-11-25 17:04:59.328725379 +0000 UTC m=+1074.168867474" lastFinishedPulling="2025-11-25 17:05:06.217461454 +0000 UTC m=+1081.057603549" observedRunningTime="2025-11-25 17:05:07.33179463 +0000 UTC m=+1082.171936745" watchObservedRunningTime="2025-11-25 17:05:07.334839254 +0000 UTC m=+1082.174981349" Nov 25 17:05:07 crc kubenswrapper[4812]: I1125 17:05:07.654918 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-f9kvr" Nov 25 17:05:07 crc kubenswrapper[4812]: I1125 17:05:07.791269 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tp27d\" (UniqueName: \"kubernetes.io/projected/046e94e3-a63c-490b-8fb1-db6592742208-kube-api-access-tp27d\") pod \"046e94e3-a63c-490b-8fb1-db6592742208\" (UID: \"046e94e3-a63c-490b-8fb1-db6592742208\") " Nov 25 17:05:07 crc kubenswrapper[4812]: I1125 17:05:07.791378 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/046e94e3-a63c-490b-8fb1-db6592742208-combined-ca-bundle\") pod \"046e94e3-a63c-490b-8fb1-db6592742208\" (UID: \"046e94e3-a63c-490b-8fb1-db6592742208\") " Nov 25 17:05:07 crc kubenswrapper[4812]: I1125 17:05:07.791562 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/046e94e3-a63c-490b-8fb1-db6592742208-config\") pod \"046e94e3-a63c-490b-8fb1-db6592742208\" (UID: \"046e94e3-a63c-490b-8fb1-db6592742208\") " Nov 25 17:05:07 crc kubenswrapper[4812]: I1125 17:05:07.797449 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/046e94e3-a63c-490b-8fb1-db6592742208-kube-api-access-tp27d" (OuterVolumeSpecName: "kube-api-access-tp27d") pod "046e94e3-a63c-490b-8fb1-db6592742208" (UID: "046e94e3-a63c-490b-8fb1-db6592742208"). InnerVolumeSpecName "kube-api-access-tp27d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:05:07 crc kubenswrapper[4812]: I1125 17:05:07.829349 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/046e94e3-a63c-490b-8fb1-db6592742208-config" (OuterVolumeSpecName: "config") pod "046e94e3-a63c-490b-8fb1-db6592742208" (UID: "046e94e3-a63c-490b-8fb1-db6592742208"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:05:07 crc kubenswrapper[4812]: I1125 17:05:07.829819 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/046e94e3-a63c-490b-8fb1-db6592742208-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "046e94e3-a63c-490b-8fb1-db6592742208" (UID: "046e94e3-a63c-490b-8fb1-db6592742208"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:05:07 crc kubenswrapper[4812]: I1125 17:05:07.885332 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7766846cbb-5x8sl" Nov 25 17:05:07 crc kubenswrapper[4812]: I1125 17:05:07.894918 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tp27d\" (UniqueName: \"kubernetes.io/projected/046e94e3-a63c-490b-8fb1-db6592742208-kube-api-access-tp27d\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:07 crc kubenswrapper[4812]: I1125 17:05:07.895300 4812 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/046e94e3-a63c-490b-8fb1-db6592742208-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:07 crc kubenswrapper[4812]: I1125 17:05:07.895359 4812 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/046e94e3-a63c-490b-8fb1-db6592742208-config\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.235656 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-f9kvr" event={"ID":"046e94e3-a63c-490b-8fb1-db6592742208","Type":"ContainerDied","Data":"c42f7d1e55220be370b3201d335f1e65f0c46b61221d5a6b2781837b7b7762b5"} Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.235709 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c42f7d1e55220be370b3201d335f1e65f0c46b61221d5a6b2781837b7b7762b5" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.235736 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-f9kvr" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.239463 4812 generic.go:334] "Generic (PLEG): container finished" podID="2e50f7bc-1b06-447d-b556-6a7adc34b072" containerID="54644c49a95a0d072cd8ced127bfec2ad39ff6e3eb3f8979d4a006d85198839d" exitCode=0 Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.239497 4812 generic.go:334] "Generic (PLEG): container finished" podID="2e50f7bc-1b06-447d-b556-6a7adc34b072" containerID="9b0cc1fbc772f906886862761f9121665dc887ae5cbe68f38c4b4b393e63546a" exitCode=2 Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.240338 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2e50f7bc-1b06-447d-b556-6a7adc34b072","Type":"ContainerDied","Data":"54644c49a95a0d072cd8ced127bfec2ad39ff6e3eb3f8979d4a006d85198839d"} Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.240378 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2e50f7bc-1b06-447d-b556-6a7adc34b072","Type":"ContainerDied","Data":"9b0cc1fbc772f906886862761f9121665dc887ae5cbe68f38c4b4b393e63546a"} Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.445080 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-699df9757c-p4d84"] Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.445397 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-699df9757c-p4d84" podUID="10e99add-334f-471f-a234-efcd2412a9e8" containerName="dnsmasq-dns" containerID="cri-o://3d8da70f6fb53b14c832d83d17d2a1ad9ed221c1dd5c8a1599915fa1fd468304" gracePeriod=10 Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.519201 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6bb684768f-q79bp"] Nov 25 17:05:08 crc kubenswrapper[4812]: E1125 17:05:08.519959 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="046e94e3-a63c-490b-8fb1-db6592742208" containerName="neutron-db-sync" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.519975 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="046e94e3-a63c-490b-8fb1-db6592742208" containerName="neutron-db-sync" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.520252 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="046e94e3-a63c-490b-8fb1-db6592742208" containerName="neutron-db-sync" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.521438 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb684768f-q79bp" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.552713 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bb684768f-q79bp"] Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.633843 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-6c8d9b5d8d-dhthn"] Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.641082 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6c8d9b5d8d-dhthn" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.647614 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.647865 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.649081 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-n4fjw" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.649965 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.659198 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6c8d9b5d8d-dhthn"] Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.712769 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6c9583db-6c4d-4ab8-b076-5f01d3f40f30-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb684768f-q79bp\" (UID: \"6c9583db-6c4d-4ab8-b076-5f01d3f40f30\") " pod="openstack/dnsmasq-dns-6bb684768f-q79bp" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.712830 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6c9583db-6c4d-4ab8-b076-5f01d3f40f30-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb684768f-q79bp\" (UID: \"6c9583db-6c4d-4ab8-b076-5f01d3f40f30\") " pod="openstack/dnsmasq-dns-6bb684768f-q79bp" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.712952 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c9583db-6c4d-4ab8-b076-5f01d3f40f30-config\") pod \"dnsmasq-dns-6bb684768f-q79bp\" (UID: \"6c9583db-6c4d-4ab8-b076-5f01d3f40f30\") " pod="openstack/dnsmasq-dns-6bb684768f-q79bp" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.712977 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6c9583db-6c4d-4ab8-b076-5f01d3f40f30-dns-svc\") pod \"dnsmasq-dns-6bb684768f-q79bp\" (UID: \"6c9583db-6c4d-4ab8-b076-5f01d3f40f30\") " pod="openstack/dnsmasq-dns-6bb684768f-q79bp" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.713013 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n9cp9\" (UniqueName: \"kubernetes.io/projected/6c9583db-6c4d-4ab8-b076-5f01d3f40f30-kube-api-access-n9cp9\") pod \"dnsmasq-dns-6bb684768f-q79bp\" (UID: \"6c9583db-6c4d-4ab8-b076-5f01d3f40f30\") " pod="openstack/dnsmasq-dns-6bb684768f-q79bp" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.814729 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6c9583db-6c4d-4ab8-b076-5f01d3f40f30-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb684768f-q79bp\" (UID: \"6c9583db-6c4d-4ab8-b076-5f01d3f40f30\") " pod="openstack/dnsmasq-dns-6bb684768f-q79bp" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.814780 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/aff51d33-8e62-4c99-bc89-c6e53270b60c-config\") pod \"neutron-6c8d9b5d8d-dhthn\" (UID: \"aff51d33-8e62-4c99-bc89-c6e53270b60c\") " pod="openstack/neutron-6c8d9b5d8d-dhthn" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.814860 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/aff51d33-8e62-4c99-bc89-c6e53270b60c-httpd-config\") pod \"neutron-6c8d9b5d8d-dhthn\" (UID: \"aff51d33-8e62-4c99-bc89-c6e53270b60c\") " pod="openstack/neutron-6c8d9b5d8d-dhthn" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.814891 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c9583db-6c4d-4ab8-b076-5f01d3f40f30-config\") pod \"dnsmasq-dns-6bb684768f-q79bp\" (UID: \"6c9583db-6c4d-4ab8-b076-5f01d3f40f30\") " pod="openstack/dnsmasq-dns-6bb684768f-q79bp" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.814906 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6c9583db-6c4d-4ab8-b076-5f01d3f40f30-dns-svc\") pod \"dnsmasq-dns-6bb684768f-q79bp\" (UID: \"6c9583db-6c4d-4ab8-b076-5f01d3f40f30\") " pod="openstack/dnsmasq-dns-6bb684768f-q79bp" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.814930 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n9cp9\" (UniqueName: \"kubernetes.io/projected/6c9583db-6c4d-4ab8-b076-5f01d3f40f30-kube-api-access-n9cp9\") pod \"dnsmasq-dns-6bb684768f-q79bp\" (UID: \"6c9583db-6c4d-4ab8-b076-5f01d3f40f30\") " pod="openstack/dnsmasq-dns-6bb684768f-q79bp" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.814969 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aff51d33-8e62-4c99-bc89-c6e53270b60c-combined-ca-bundle\") pod \"neutron-6c8d9b5d8d-dhthn\" (UID: \"aff51d33-8e62-4c99-bc89-c6e53270b60c\") " pod="openstack/neutron-6c8d9b5d8d-dhthn" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.814997 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/aff51d33-8e62-4c99-bc89-c6e53270b60c-ovndb-tls-certs\") pod \"neutron-6c8d9b5d8d-dhthn\" (UID: \"aff51d33-8e62-4c99-bc89-c6e53270b60c\") " pod="openstack/neutron-6c8d9b5d8d-dhthn" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.815044 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f9bhc\" (UniqueName: \"kubernetes.io/projected/aff51d33-8e62-4c99-bc89-c6e53270b60c-kube-api-access-f9bhc\") pod \"neutron-6c8d9b5d8d-dhthn\" (UID: \"aff51d33-8e62-4c99-bc89-c6e53270b60c\") " pod="openstack/neutron-6c8d9b5d8d-dhthn" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.815079 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6c9583db-6c4d-4ab8-b076-5f01d3f40f30-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb684768f-q79bp\" (UID: \"6c9583db-6c4d-4ab8-b076-5f01d3f40f30\") " pod="openstack/dnsmasq-dns-6bb684768f-q79bp" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.816549 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c9583db-6c4d-4ab8-b076-5f01d3f40f30-config\") pod \"dnsmasq-dns-6bb684768f-q79bp\" (UID: \"6c9583db-6c4d-4ab8-b076-5f01d3f40f30\") " pod="openstack/dnsmasq-dns-6bb684768f-q79bp" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.817314 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6c9583db-6c4d-4ab8-b076-5f01d3f40f30-dns-svc\") pod \"dnsmasq-dns-6bb684768f-q79bp\" (UID: \"6c9583db-6c4d-4ab8-b076-5f01d3f40f30\") " pod="openstack/dnsmasq-dns-6bb684768f-q79bp" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.817611 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6c9583db-6c4d-4ab8-b076-5f01d3f40f30-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb684768f-q79bp\" (UID: \"6c9583db-6c4d-4ab8-b076-5f01d3f40f30\") " pod="openstack/dnsmasq-dns-6bb684768f-q79bp" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.818514 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6c9583db-6c4d-4ab8-b076-5f01d3f40f30-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb684768f-q79bp\" (UID: \"6c9583db-6c4d-4ab8-b076-5f01d3f40f30\") " pod="openstack/dnsmasq-dns-6bb684768f-q79bp" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.850988 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n9cp9\" (UniqueName: \"kubernetes.io/projected/6c9583db-6c4d-4ab8-b076-5f01d3f40f30-kube-api-access-n9cp9\") pod \"dnsmasq-dns-6bb684768f-q79bp\" (UID: \"6c9583db-6c4d-4ab8-b076-5f01d3f40f30\") " pod="openstack/dnsmasq-dns-6bb684768f-q79bp" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.917610 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/aff51d33-8e62-4c99-bc89-c6e53270b60c-config\") pod \"neutron-6c8d9b5d8d-dhthn\" (UID: \"aff51d33-8e62-4c99-bc89-c6e53270b60c\") " pod="openstack/neutron-6c8d9b5d8d-dhthn" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.917765 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/aff51d33-8e62-4c99-bc89-c6e53270b60c-httpd-config\") pod \"neutron-6c8d9b5d8d-dhthn\" (UID: \"aff51d33-8e62-4c99-bc89-c6e53270b60c\") " pod="openstack/neutron-6c8d9b5d8d-dhthn" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.917862 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aff51d33-8e62-4c99-bc89-c6e53270b60c-combined-ca-bundle\") pod \"neutron-6c8d9b5d8d-dhthn\" (UID: \"aff51d33-8e62-4c99-bc89-c6e53270b60c\") " pod="openstack/neutron-6c8d9b5d8d-dhthn" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.917940 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/aff51d33-8e62-4c99-bc89-c6e53270b60c-ovndb-tls-certs\") pod \"neutron-6c8d9b5d8d-dhthn\" (UID: \"aff51d33-8e62-4c99-bc89-c6e53270b60c\") " pod="openstack/neutron-6c8d9b5d8d-dhthn" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.918049 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f9bhc\" (UniqueName: \"kubernetes.io/projected/aff51d33-8e62-4c99-bc89-c6e53270b60c-kube-api-access-f9bhc\") pod \"neutron-6c8d9b5d8d-dhthn\" (UID: \"aff51d33-8e62-4c99-bc89-c6e53270b60c\") " pod="openstack/neutron-6c8d9b5d8d-dhthn" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.983247 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/aff51d33-8e62-4c99-bc89-c6e53270b60c-httpd-config\") pod \"neutron-6c8d9b5d8d-dhthn\" (UID: \"aff51d33-8e62-4c99-bc89-c6e53270b60c\") " pod="openstack/neutron-6c8d9b5d8d-dhthn" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.983258 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/aff51d33-8e62-4c99-bc89-c6e53270b60c-ovndb-tls-certs\") pod \"neutron-6c8d9b5d8d-dhthn\" (UID: \"aff51d33-8e62-4c99-bc89-c6e53270b60c\") " pod="openstack/neutron-6c8d9b5d8d-dhthn" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.983383 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/aff51d33-8e62-4c99-bc89-c6e53270b60c-config\") pod \"neutron-6c8d9b5d8d-dhthn\" (UID: \"aff51d33-8e62-4c99-bc89-c6e53270b60c\") " pod="openstack/neutron-6c8d9b5d8d-dhthn" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.983577 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f9bhc\" (UniqueName: \"kubernetes.io/projected/aff51d33-8e62-4c99-bc89-c6e53270b60c-kube-api-access-f9bhc\") pod \"neutron-6c8d9b5d8d-dhthn\" (UID: \"aff51d33-8e62-4c99-bc89-c6e53270b60c\") " pod="openstack/neutron-6c8d9b5d8d-dhthn" Nov 25 17:05:08 crc kubenswrapper[4812]: I1125 17:05:08.991695 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aff51d33-8e62-4c99-bc89-c6e53270b60c-combined-ca-bundle\") pod \"neutron-6c8d9b5d8d-dhthn\" (UID: \"aff51d33-8e62-4c99-bc89-c6e53270b60c\") " pod="openstack/neutron-6c8d9b5d8d-dhthn" Nov 25 17:05:09 crc kubenswrapper[4812]: I1125 17:05:09.029003 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-699df9757c-p4d84" Nov 25 17:05:09 crc kubenswrapper[4812]: I1125 17:05:09.139292 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb684768f-q79bp" Nov 25 17:05:09 crc kubenswrapper[4812]: I1125 17:05:09.222094 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4jf6\" (UniqueName: \"kubernetes.io/projected/10e99add-334f-471f-a234-efcd2412a9e8-kube-api-access-w4jf6\") pod \"10e99add-334f-471f-a234-efcd2412a9e8\" (UID: \"10e99add-334f-471f-a234-efcd2412a9e8\") " Nov 25 17:05:09 crc kubenswrapper[4812]: I1125 17:05:09.222351 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/10e99add-334f-471f-a234-efcd2412a9e8-dns-svc\") pod \"10e99add-334f-471f-a234-efcd2412a9e8\" (UID: \"10e99add-334f-471f-a234-efcd2412a9e8\") " Nov 25 17:05:09 crc kubenswrapper[4812]: I1125 17:05:09.222410 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/10e99add-334f-471f-a234-efcd2412a9e8-ovsdbserver-sb\") pod \"10e99add-334f-471f-a234-efcd2412a9e8\" (UID: \"10e99add-334f-471f-a234-efcd2412a9e8\") " Nov 25 17:05:09 crc kubenswrapper[4812]: I1125 17:05:09.222488 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/10e99add-334f-471f-a234-efcd2412a9e8-config\") pod \"10e99add-334f-471f-a234-efcd2412a9e8\" (UID: \"10e99add-334f-471f-a234-efcd2412a9e8\") " Nov 25 17:05:09 crc kubenswrapper[4812]: I1125 17:05:09.222596 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/10e99add-334f-471f-a234-efcd2412a9e8-ovsdbserver-nb\") pod \"10e99add-334f-471f-a234-efcd2412a9e8\" (UID: \"10e99add-334f-471f-a234-efcd2412a9e8\") " Nov 25 17:05:09 crc kubenswrapper[4812]: I1125 17:05:09.259483 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10e99add-334f-471f-a234-efcd2412a9e8-kube-api-access-w4jf6" (OuterVolumeSpecName: "kube-api-access-w4jf6") pod "10e99add-334f-471f-a234-efcd2412a9e8" (UID: "10e99add-334f-471f-a234-efcd2412a9e8"). InnerVolumeSpecName "kube-api-access-w4jf6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:05:09 crc kubenswrapper[4812]: I1125 17:05:09.268114 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6c8d9b5d8d-dhthn" Nov 25 17:05:09 crc kubenswrapper[4812]: I1125 17:05:09.279656 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-699df9757c-p4d84" Nov 25 17:05:09 crc kubenswrapper[4812]: I1125 17:05:09.280619 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-699df9757c-p4d84" event={"ID":"10e99add-334f-471f-a234-efcd2412a9e8","Type":"ContainerDied","Data":"3d8da70f6fb53b14c832d83d17d2a1ad9ed221c1dd5c8a1599915fa1fd468304"} Nov 25 17:05:09 crc kubenswrapper[4812]: I1125 17:05:09.280697 4812 scope.go:117] "RemoveContainer" containerID="3d8da70f6fb53b14c832d83d17d2a1ad9ed221c1dd5c8a1599915fa1fd468304" Nov 25 17:05:09 crc kubenswrapper[4812]: I1125 17:05:09.280897 4812 generic.go:334] "Generic (PLEG): container finished" podID="10e99add-334f-471f-a234-efcd2412a9e8" containerID="3d8da70f6fb53b14c832d83d17d2a1ad9ed221c1dd5c8a1599915fa1fd468304" exitCode=0 Nov 25 17:05:09 crc kubenswrapper[4812]: I1125 17:05:09.281196 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-699df9757c-p4d84" event={"ID":"10e99add-334f-471f-a234-efcd2412a9e8","Type":"ContainerDied","Data":"a531189902edc2f909801d3110ecb8ce00ae47f75be6da3980bb05ea5781df8e"} Nov 25 17:05:09 crc kubenswrapper[4812]: I1125 17:05:09.284373 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/10e99add-334f-471f-a234-efcd2412a9e8-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "10e99add-334f-471f-a234-efcd2412a9e8" (UID: "10e99add-334f-471f-a234-efcd2412a9e8"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:05:09 crc kubenswrapper[4812]: I1125 17:05:09.295919 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/10e99add-334f-471f-a234-efcd2412a9e8-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "10e99add-334f-471f-a234-efcd2412a9e8" (UID: "10e99add-334f-471f-a234-efcd2412a9e8"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:05:09 crc kubenswrapper[4812]: I1125 17:05:09.297879 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/10e99add-334f-471f-a234-efcd2412a9e8-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "10e99add-334f-471f-a234-efcd2412a9e8" (UID: "10e99add-334f-471f-a234-efcd2412a9e8"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:05:09 crc kubenswrapper[4812]: I1125 17:05:09.325231 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4jf6\" (UniqueName: \"kubernetes.io/projected/10e99add-334f-471f-a234-efcd2412a9e8-kube-api-access-w4jf6\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:09 crc kubenswrapper[4812]: I1125 17:05:09.325266 4812 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/10e99add-334f-471f-a234-efcd2412a9e8-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:09 crc kubenswrapper[4812]: I1125 17:05:09.325277 4812 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/10e99add-334f-471f-a234-efcd2412a9e8-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:09 crc kubenswrapper[4812]: I1125 17:05:09.325285 4812 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/10e99add-334f-471f-a234-efcd2412a9e8-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:09 crc kubenswrapper[4812]: I1125 17:05:09.389989 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/10e99add-334f-471f-a234-efcd2412a9e8-config" (OuterVolumeSpecName: "config") pod "10e99add-334f-471f-a234-efcd2412a9e8" (UID: "10e99add-334f-471f-a234-efcd2412a9e8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:05:09 crc kubenswrapper[4812]: I1125 17:05:09.411251 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bb684768f-q79bp"] Nov 25 17:05:09 crc kubenswrapper[4812]: I1125 17:05:09.426398 4812 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/10e99add-334f-471f-a234-efcd2412a9e8-config\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:09 crc kubenswrapper[4812]: I1125 17:05:09.448885 4812 scope.go:117] "RemoveContainer" containerID="62ae12ee4fcda9363589415cf3f390f51be2955e9de9297e90ae37255ee9b9a4" Nov 25 17:05:09 crc kubenswrapper[4812]: I1125 17:05:09.498059 4812 scope.go:117] "RemoveContainer" containerID="3d8da70f6fb53b14c832d83d17d2a1ad9ed221c1dd5c8a1599915fa1fd468304" Nov 25 17:05:09 crc kubenswrapper[4812]: E1125 17:05:09.498908 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3d8da70f6fb53b14c832d83d17d2a1ad9ed221c1dd5c8a1599915fa1fd468304\": container with ID starting with 3d8da70f6fb53b14c832d83d17d2a1ad9ed221c1dd5c8a1599915fa1fd468304 not found: ID does not exist" containerID="3d8da70f6fb53b14c832d83d17d2a1ad9ed221c1dd5c8a1599915fa1fd468304" Nov 25 17:05:09 crc kubenswrapper[4812]: I1125 17:05:09.498946 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3d8da70f6fb53b14c832d83d17d2a1ad9ed221c1dd5c8a1599915fa1fd468304"} err="failed to get container status \"3d8da70f6fb53b14c832d83d17d2a1ad9ed221c1dd5c8a1599915fa1fd468304\": rpc error: code = NotFound desc = could not find container \"3d8da70f6fb53b14c832d83d17d2a1ad9ed221c1dd5c8a1599915fa1fd468304\": container with ID starting with 3d8da70f6fb53b14c832d83d17d2a1ad9ed221c1dd5c8a1599915fa1fd468304 not found: ID does not exist" Nov 25 17:05:09 crc kubenswrapper[4812]: I1125 17:05:09.498973 4812 scope.go:117] "RemoveContainer" containerID="62ae12ee4fcda9363589415cf3f390f51be2955e9de9297e90ae37255ee9b9a4" Nov 25 17:05:09 crc kubenswrapper[4812]: E1125 17:05:09.500216 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"62ae12ee4fcda9363589415cf3f390f51be2955e9de9297e90ae37255ee9b9a4\": container with ID starting with 62ae12ee4fcda9363589415cf3f390f51be2955e9de9297e90ae37255ee9b9a4 not found: ID does not exist" containerID="62ae12ee4fcda9363589415cf3f390f51be2955e9de9297e90ae37255ee9b9a4" Nov 25 17:05:09 crc kubenswrapper[4812]: I1125 17:05:09.500252 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"62ae12ee4fcda9363589415cf3f390f51be2955e9de9297e90ae37255ee9b9a4"} err="failed to get container status \"62ae12ee4fcda9363589415cf3f390f51be2955e9de9297e90ae37255ee9b9a4\": rpc error: code = NotFound desc = could not find container \"62ae12ee4fcda9363589415cf3f390f51be2955e9de9297e90ae37255ee9b9a4\": container with ID starting with 62ae12ee4fcda9363589415cf3f390f51be2955e9de9297e90ae37255ee9b9a4 not found: ID does not exist" Nov 25 17:05:09 crc kubenswrapper[4812]: I1125 17:05:09.617875 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-699df9757c-p4d84"] Nov 25 17:05:09 crc kubenswrapper[4812]: I1125 17:05:09.626727 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-699df9757c-p4d84"] Nov 25 17:05:09 crc kubenswrapper[4812]: I1125 17:05:09.841317 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="10e99add-334f-471f-a234-efcd2412a9e8" path="/var/lib/kubelet/pods/10e99add-334f-471f-a234-efcd2412a9e8/volumes" Nov 25 17:05:09 crc kubenswrapper[4812]: I1125 17:05:09.915183 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6c8d9b5d8d-dhthn"] Nov 25 17:05:10 crc kubenswrapper[4812]: W1125 17:05:10.080793 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaff51d33_8e62_4c99_bc89_c6e53270b60c.slice/crio-d72788c9a5e209d04c56729463e5a62c826307372f233553eb06b6639d03bca0 WatchSource:0}: Error finding container d72788c9a5e209d04c56729463e5a62c826307372f233553eb06b6639d03bca0: Status 404 returned error can't find the container with id d72788c9a5e209d04c56729463e5a62c826307372f233553eb06b6639d03bca0 Nov 25 17:05:10 crc kubenswrapper[4812]: I1125 17:05:10.289979 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6c8d9b5d8d-dhthn" event={"ID":"aff51d33-8e62-4c99-bc89-c6e53270b60c","Type":"ContainerStarted","Data":"d72788c9a5e209d04c56729463e5a62c826307372f233553eb06b6639d03bca0"} Nov 25 17:05:10 crc kubenswrapper[4812]: I1125 17:05:10.292336 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb684768f-q79bp" event={"ID":"6c9583db-6c4d-4ab8-b076-5f01d3f40f30","Type":"ContainerStarted","Data":"38c23bd134f029f3d881d7ecbd923dbd4cb208795c44e512c4b77c0a4e8744a5"} Nov 25 17:05:11 crc kubenswrapper[4812]: I1125 17:05:11.158282 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-7449f79865-5fw9f"] Nov 25 17:05:11 crc kubenswrapper[4812]: E1125 17:05:11.158963 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10e99add-334f-471f-a234-efcd2412a9e8" containerName="init" Nov 25 17:05:11 crc kubenswrapper[4812]: I1125 17:05:11.158983 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="10e99add-334f-471f-a234-efcd2412a9e8" containerName="init" Nov 25 17:05:11 crc kubenswrapper[4812]: E1125 17:05:11.158994 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10e99add-334f-471f-a234-efcd2412a9e8" containerName="dnsmasq-dns" Nov 25 17:05:11 crc kubenswrapper[4812]: I1125 17:05:11.159000 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="10e99add-334f-471f-a234-efcd2412a9e8" containerName="dnsmasq-dns" Nov 25 17:05:11 crc kubenswrapper[4812]: I1125 17:05:11.159238 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="10e99add-334f-471f-a234-efcd2412a9e8" containerName="dnsmasq-dns" Nov 25 17:05:11 crc kubenswrapper[4812]: I1125 17:05:11.160183 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7449f79865-5fw9f" Nov 25 17:05:11 crc kubenswrapper[4812]: I1125 17:05:11.165200 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 25 17:05:11 crc kubenswrapper[4812]: I1125 17:05:11.165777 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 25 17:05:11 crc kubenswrapper[4812]: I1125 17:05:11.177066 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7449f79865-5fw9f"] Nov 25 17:05:11 crc kubenswrapper[4812]: I1125 17:05:11.306614 4812 generic.go:334] "Generic (PLEG): container finished" podID="2e50f7bc-1b06-447d-b556-6a7adc34b072" containerID="957cdd1a798ab2cc8cb5d049404b30e0168325ba9f8ec92c713bba8568e809a1" exitCode=0 Nov 25 17:05:11 crc kubenswrapper[4812]: I1125 17:05:11.306687 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2e50f7bc-1b06-447d-b556-6a7adc34b072","Type":"ContainerDied","Data":"957cdd1a798ab2cc8cb5d049404b30e0168325ba9f8ec92c713bba8568e809a1"} Nov 25 17:05:11 crc kubenswrapper[4812]: I1125 17:05:11.361903 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85-internal-tls-certs\") pod \"neutron-7449f79865-5fw9f\" (UID: \"b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85\") " pod="openstack/neutron-7449f79865-5fw9f" Nov 25 17:05:11 crc kubenswrapper[4812]: I1125 17:05:11.361985 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85-config\") pod \"neutron-7449f79865-5fw9f\" (UID: \"b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85\") " pod="openstack/neutron-7449f79865-5fw9f" Nov 25 17:05:11 crc kubenswrapper[4812]: I1125 17:05:11.362052 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85-ovndb-tls-certs\") pod \"neutron-7449f79865-5fw9f\" (UID: \"b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85\") " pod="openstack/neutron-7449f79865-5fw9f" Nov 25 17:05:11 crc kubenswrapper[4812]: I1125 17:05:11.362078 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85-httpd-config\") pod \"neutron-7449f79865-5fw9f\" (UID: \"b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85\") " pod="openstack/neutron-7449f79865-5fw9f" Nov 25 17:05:11 crc kubenswrapper[4812]: I1125 17:05:11.362374 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-247bc\" (UniqueName: \"kubernetes.io/projected/b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85-kube-api-access-247bc\") pod \"neutron-7449f79865-5fw9f\" (UID: \"b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85\") " pod="openstack/neutron-7449f79865-5fw9f" Nov 25 17:05:11 crc kubenswrapper[4812]: I1125 17:05:11.362445 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85-combined-ca-bundle\") pod \"neutron-7449f79865-5fw9f\" (UID: \"b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85\") " pod="openstack/neutron-7449f79865-5fw9f" Nov 25 17:05:11 crc kubenswrapper[4812]: I1125 17:05:11.362676 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85-public-tls-certs\") pod \"neutron-7449f79865-5fw9f\" (UID: \"b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85\") " pod="openstack/neutron-7449f79865-5fw9f" Nov 25 17:05:11 crc kubenswrapper[4812]: I1125 17:05:11.463890 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85-httpd-config\") pod \"neutron-7449f79865-5fw9f\" (UID: \"b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85\") " pod="openstack/neutron-7449f79865-5fw9f" Nov 25 17:05:11 crc kubenswrapper[4812]: I1125 17:05:11.464009 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-247bc\" (UniqueName: \"kubernetes.io/projected/b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85-kube-api-access-247bc\") pod \"neutron-7449f79865-5fw9f\" (UID: \"b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85\") " pod="openstack/neutron-7449f79865-5fw9f" Nov 25 17:05:11 crc kubenswrapper[4812]: I1125 17:05:11.464044 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85-combined-ca-bundle\") pod \"neutron-7449f79865-5fw9f\" (UID: \"b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85\") " pod="openstack/neutron-7449f79865-5fw9f" Nov 25 17:05:11 crc kubenswrapper[4812]: I1125 17:05:11.464100 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85-public-tls-certs\") pod \"neutron-7449f79865-5fw9f\" (UID: \"b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85\") " pod="openstack/neutron-7449f79865-5fw9f" Nov 25 17:05:11 crc kubenswrapper[4812]: I1125 17:05:11.464131 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85-internal-tls-certs\") pod \"neutron-7449f79865-5fw9f\" (UID: \"b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85\") " pod="openstack/neutron-7449f79865-5fw9f" Nov 25 17:05:11 crc kubenswrapper[4812]: I1125 17:05:11.464157 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85-config\") pod \"neutron-7449f79865-5fw9f\" (UID: \"b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85\") " pod="openstack/neutron-7449f79865-5fw9f" Nov 25 17:05:11 crc kubenswrapper[4812]: I1125 17:05:11.464203 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85-ovndb-tls-certs\") pod \"neutron-7449f79865-5fw9f\" (UID: \"b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85\") " pod="openstack/neutron-7449f79865-5fw9f" Nov 25 17:05:11 crc kubenswrapper[4812]: I1125 17:05:11.470393 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85-public-tls-certs\") pod \"neutron-7449f79865-5fw9f\" (UID: \"b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85\") " pod="openstack/neutron-7449f79865-5fw9f" Nov 25 17:05:11 crc kubenswrapper[4812]: I1125 17:05:11.470422 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85-internal-tls-certs\") pod \"neutron-7449f79865-5fw9f\" (UID: \"b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85\") " pod="openstack/neutron-7449f79865-5fw9f" Nov 25 17:05:11 crc kubenswrapper[4812]: I1125 17:05:11.470804 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85-config\") pod \"neutron-7449f79865-5fw9f\" (UID: \"b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85\") " pod="openstack/neutron-7449f79865-5fw9f" Nov 25 17:05:11 crc kubenswrapper[4812]: I1125 17:05:11.475014 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85-ovndb-tls-certs\") pod \"neutron-7449f79865-5fw9f\" (UID: \"b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85\") " pod="openstack/neutron-7449f79865-5fw9f" Nov 25 17:05:11 crc kubenswrapper[4812]: I1125 17:05:11.481576 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85-combined-ca-bundle\") pod \"neutron-7449f79865-5fw9f\" (UID: \"b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85\") " pod="openstack/neutron-7449f79865-5fw9f" Nov 25 17:05:11 crc kubenswrapper[4812]: I1125 17:05:11.487592 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-247bc\" (UniqueName: \"kubernetes.io/projected/b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85-kube-api-access-247bc\") pod \"neutron-7449f79865-5fw9f\" (UID: \"b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85\") " pod="openstack/neutron-7449f79865-5fw9f" Nov 25 17:05:11 crc kubenswrapper[4812]: I1125 17:05:11.490262 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85-httpd-config\") pod \"neutron-7449f79865-5fw9f\" (UID: \"b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85\") " pod="openstack/neutron-7449f79865-5fw9f" Nov 25 17:05:11 crc kubenswrapper[4812]: I1125 17:05:11.778875 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7449f79865-5fw9f" Nov 25 17:05:12 crc kubenswrapper[4812]: W1125 17:05:12.420707 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb5dbf4cb_9b43_4e1c_a420_0f55d4f94b85.slice/crio-e21625c0a709c592b67d68ffc6e5b21ed9885a853a26f7b09e7d53e7a44f1118 WatchSource:0}: Error finding container e21625c0a709c592b67d68ffc6e5b21ed9885a853a26f7b09e7d53e7a44f1118: Status 404 returned error can't find the container with id e21625c0a709c592b67d68ffc6e5b21ed9885a853a26f7b09e7d53e7a44f1118 Nov 25 17:05:12 crc kubenswrapper[4812]: I1125 17:05:12.422946 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7449f79865-5fw9f"] Nov 25 17:05:12 crc kubenswrapper[4812]: I1125 17:05:12.824065 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 17:05:12 crc kubenswrapper[4812]: I1125 17:05:12.988135 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2e50f7bc-1b06-447d-b556-6a7adc34b072-scripts\") pod \"2e50f7bc-1b06-447d-b556-6a7adc34b072\" (UID: \"2e50f7bc-1b06-447d-b556-6a7adc34b072\") " Nov 25 17:05:12 crc kubenswrapper[4812]: I1125 17:05:12.988186 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e50f7bc-1b06-447d-b556-6a7adc34b072-config-data\") pod \"2e50f7bc-1b06-447d-b556-6a7adc34b072\" (UID: \"2e50f7bc-1b06-447d-b556-6a7adc34b072\") " Nov 25 17:05:12 crc kubenswrapper[4812]: I1125 17:05:12.988227 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2e50f7bc-1b06-447d-b556-6a7adc34b072-log-httpd\") pod \"2e50f7bc-1b06-447d-b556-6a7adc34b072\" (UID: \"2e50f7bc-1b06-447d-b556-6a7adc34b072\") " Nov 25 17:05:12 crc kubenswrapper[4812]: I1125 17:05:12.988401 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e50f7bc-1b06-447d-b556-6a7adc34b072-combined-ca-bundle\") pod \"2e50f7bc-1b06-447d-b556-6a7adc34b072\" (UID: \"2e50f7bc-1b06-447d-b556-6a7adc34b072\") " Nov 25 17:05:12 crc kubenswrapper[4812]: I1125 17:05:12.988468 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x8dkn\" (UniqueName: \"kubernetes.io/projected/2e50f7bc-1b06-447d-b556-6a7adc34b072-kube-api-access-x8dkn\") pod \"2e50f7bc-1b06-447d-b556-6a7adc34b072\" (UID: \"2e50f7bc-1b06-447d-b556-6a7adc34b072\") " Nov 25 17:05:12 crc kubenswrapper[4812]: I1125 17:05:12.988499 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2e50f7bc-1b06-447d-b556-6a7adc34b072-sg-core-conf-yaml\") pod \"2e50f7bc-1b06-447d-b556-6a7adc34b072\" (UID: \"2e50f7bc-1b06-447d-b556-6a7adc34b072\") " Nov 25 17:05:12 crc kubenswrapper[4812]: I1125 17:05:12.988545 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2e50f7bc-1b06-447d-b556-6a7adc34b072-run-httpd\") pod \"2e50f7bc-1b06-447d-b556-6a7adc34b072\" (UID: \"2e50f7bc-1b06-447d-b556-6a7adc34b072\") " Nov 25 17:05:12 crc kubenswrapper[4812]: I1125 17:05:12.988732 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2e50f7bc-1b06-447d-b556-6a7adc34b072-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "2e50f7bc-1b06-447d-b556-6a7adc34b072" (UID: "2e50f7bc-1b06-447d-b556-6a7adc34b072"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:05:12 crc kubenswrapper[4812]: I1125 17:05:12.988911 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2e50f7bc-1b06-447d-b556-6a7adc34b072-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "2e50f7bc-1b06-447d-b556-6a7adc34b072" (UID: "2e50f7bc-1b06-447d-b556-6a7adc34b072"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:05:12 crc kubenswrapper[4812]: I1125 17:05:12.989057 4812 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2e50f7bc-1b06-447d-b556-6a7adc34b072-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:12 crc kubenswrapper[4812]: I1125 17:05:12.989081 4812 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2e50f7bc-1b06-447d-b556-6a7adc34b072-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:12 crc kubenswrapper[4812]: I1125 17:05:12.993078 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e50f7bc-1b06-447d-b556-6a7adc34b072-scripts" (OuterVolumeSpecName: "scripts") pod "2e50f7bc-1b06-447d-b556-6a7adc34b072" (UID: "2e50f7bc-1b06-447d-b556-6a7adc34b072"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:05:12 crc kubenswrapper[4812]: I1125 17:05:12.998708 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e50f7bc-1b06-447d-b556-6a7adc34b072-kube-api-access-x8dkn" (OuterVolumeSpecName: "kube-api-access-x8dkn") pod "2e50f7bc-1b06-447d-b556-6a7adc34b072" (UID: "2e50f7bc-1b06-447d-b556-6a7adc34b072"). InnerVolumeSpecName "kube-api-access-x8dkn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.011147 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e50f7bc-1b06-447d-b556-6a7adc34b072-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "2e50f7bc-1b06-447d-b556-6a7adc34b072" (UID: "2e50f7bc-1b06-447d-b556-6a7adc34b072"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.037790 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e50f7bc-1b06-447d-b556-6a7adc34b072-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2e50f7bc-1b06-447d-b556-6a7adc34b072" (UID: "2e50f7bc-1b06-447d-b556-6a7adc34b072"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.063712 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e50f7bc-1b06-447d-b556-6a7adc34b072-config-data" (OuterVolumeSpecName: "config-data") pod "2e50f7bc-1b06-447d-b556-6a7adc34b072" (UID: "2e50f7bc-1b06-447d-b556-6a7adc34b072"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.090481 4812 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e50f7bc-1b06-447d-b556-6a7adc34b072-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.090510 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x8dkn\" (UniqueName: \"kubernetes.io/projected/2e50f7bc-1b06-447d-b556-6a7adc34b072-kube-api-access-x8dkn\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.090521 4812 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2e50f7bc-1b06-447d-b556-6a7adc34b072-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.090541 4812 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2e50f7bc-1b06-447d-b556-6a7adc34b072-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.090549 4812 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e50f7bc-1b06-447d-b556-6a7adc34b072-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.326170 4812 generic.go:334] "Generic (PLEG): container finished" podID="6c9583db-6c4d-4ab8-b076-5f01d3f40f30" containerID="68508a893204d94021ab681d245ec2b9e3d72cf27a6d7c2d48d5a05d45600267" exitCode=0 Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.326271 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb684768f-q79bp" event={"ID":"6c9583db-6c4d-4ab8-b076-5f01d3f40f30","Type":"ContainerDied","Data":"68508a893204d94021ab681d245ec2b9e3d72cf27a6d7c2d48d5a05d45600267"} Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.331595 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7449f79865-5fw9f" event={"ID":"b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85","Type":"ContainerStarted","Data":"f875c7b269bb1b2d0d33e9b3450bfac77fadd501a29228ac8f1e78a024921f36"} Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.331633 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7449f79865-5fw9f" event={"ID":"b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85","Type":"ContainerStarted","Data":"cadb0d081b840b276cd6b6957742cb4015df8c3f6efafd1ca371caf168a3249a"} Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.331643 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7449f79865-5fw9f" event={"ID":"b5dbf4cb-9b43-4e1c-a420-0f55d4f94b85","Type":"ContainerStarted","Data":"e21625c0a709c592b67d68ffc6e5b21ed9885a853a26f7b09e7d53e7a44f1118"} Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.332307 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-7449f79865-5fw9f" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.336080 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6c8d9b5d8d-dhthn" event={"ID":"aff51d33-8e62-4c99-bc89-c6e53270b60c","Type":"ContainerStarted","Data":"abda29774ee0b1e64e94d61d01013714faa29d1bf9119b37ef10ca17ac6386ba"} Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.336164 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6c8d9b5d8d-dhthn" event={"ID":"aff51d33-8e62-4c99-bc89-c6e53270b60c","Type":"ContainerStarted","Data":"a3df4db6ec8e762e8d7d2749d9960042f5364cd1bd5885a4b434c0d687223402"} Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.336684 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-6c8d9b5d8d-dhthn" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.340205 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2e50f7bc-1b06-447d-b556-6a7adc34b072","Type":"ContainerDied","Data":"a326aa7548140618ef18fd4e891d946427252442b97a9884c7425a18bce73ae2"} Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.340251 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.340278 4812 scope.go:117] "RemoveContainer" containerID="54644c49a95a0d072cd8ced127bfec2ad39ff6e3eb3f8979d4a006d85198839d" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.380314 4812 scope.go:117] "RemoveContainer" containerID="9b0cc1fbc772f906886862761f9121665dc887ae5cbe68f38c4b4b393e63546a" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.398812 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-7449f79865-5fw9f" podStartSLOduration=2.398796264 podStartE2EDuration="2.398796264s" podCreationTimestamp="2025-11-25 17:05:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:05:13.369998436 +0000 UTC m=+1088.210140541" watchObservedRunningTime="2025-11-25 17:05:13.398796264 +0000 UTC m=+1088.238938349" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.433388 4812 scope.go:117] "RemoveContainer" containerID="957cdd1a798ab2cc8cb5d049404b30e0168325ba9f8ec92c713bba8568e809a1" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.446268 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-6c8d9b5d8d-dhthn" podStartSLOduration=5.446238783 podStartE2EDuration="5.446238783s" podCreationTimestamp="2025-11-25 17:05:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:05:13.389255462 +0000 UTC m=+1088.229397577" watchObservedRunningTime="2025-11-25 17:05:13.446238783 +0000 UTC m=+1088.286380878" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.477481 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.491633 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.501591 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:05:13 crc kubenswrapper[4812]: E1125 17:05:13.502046 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e50f7bc-1b06-447d-b556-6a7adc34b072" containerName="ceilometer-notification-agent" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.502071 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e50f7bc-1b06-447d-b556-6a7adc34b072" containerName="ceilometer-notification-agent" Nov 25 17:05:13 crc kubenswrapper[4812]: E1125 17:05:13.502085 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e50f7bc-1b06-447d-b556-6a7adc34b072" containerName="sg-core" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.502093 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e50f7bc-1b06-447d-b556-6a7adc34b072" containerName="sg-core" Nov 25 17:05:13 crc kubenswrapper[4812]: E1125 17:05:13.502130 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e50f7bc-1b06-447d-b556-6a7adc34b072" containerName="proxy-httpd" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.502139 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e50f7bc-1b06-447d-b556-6a7adc34b072" containerName="proxy-httpd" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.502327 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e50f7bc-1b06-447d-b556-6a7adc34b072" containerName="ceilometer-notification-agent" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.502353 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e50f7bc-1b06-447d-b556-6a7adc34b072" containerName="sg-core" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.502391 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e50f7bc-1b06-447d-b556-6a7adc34b072" containerName="proxy-httpd" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.504182 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.507137 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.508091 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.510563 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8e906f47-2af7-4fae-8aa2-9bde21c41f5b\") " pod="openstack/ceilometer-0" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.510618 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-run-httpd\") pod \"ceilometer-0\" (UID: \"8e906f47-2af7-4fae-8aa2-9bde21c41f5b\") " pod="openstack/ceilometer-0" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.510645 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-log-httpd\") pod \"ceilometer-0\" (UID: \"8e906f47-2af7-4fae-8aa2-9bde21c41f5b\") " pod="openstack/ceilometer-0" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.510748 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8e906f47-2af7-4fae-8aa2-9bde21c41f5b\") " pod="openstack/ceilometer-0" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.510825 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-scripts\") pod \"ceilometer-0\" (UID: \"8e906f47-2af7-4fae-8aa2-9bde21c41f5b\") " pod="openstack/ceilometer-0" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.510889 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2n6zv\" (UniqueName: \"kubernetes.io/projected/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-kube-api-access-2n6zv\") pod \"ceilometer-0\" (UID: \"8e906f47-2af7-4fae-8aa2-9bde21c41f5b\") " pod="openstack/ceilometer-0" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.510978 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-config-data\") pod \"ceilometer-0\" (UID: \"8e906f47-2af7-4fae-8aa2-9bde21c41f5b\") " pod="openstack/ceilometer-0" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.512735 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.612710 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-config-data\") pod \"ceilometer-0\" (UID: \"8e906f47-2af7-4fae-8aa2-9bde21c41f5b\") " pod="openstack/ceilometer-0" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.612781 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8e906f47-2af7-4fae-8aa2-9bde21c41f5b\") " pod="openstack/ceilometer-0" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.612816 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-run-httpd\") pod \"ceilometer-0\" (UID: \"8e906f47-2af7-4fae-8aa2-9bde21c41f5b\") " pod="openstack/ceilometer-0" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.612846 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-log-httpd\") pod \"ceilometer-0\" (UID: \"8e906f47-2af7-4fae-8aa2-9bde21c41f5b\") " pod="openstack/ceilometer-0" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.612903 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8e906f47-2af7-4fae-8aa2-9bde21c41f5b\") " pod="openstack/ceilometer-0" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.612972 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-scripts\") pod \"ceilometer-0\" (UID: \"8e906f47-2af7-4fae-8aa2-9bde21c41f5b\") " pod="openstack/ceilometer-0" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.613074 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2n6zv\" (UniqueName: \"kubernetes.io/projected/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-kube-api-access-2n6zv\") pod \"ceilometer-0\" (UID: \"8e906f47-2af7-4fae-8aa2-9bde21c41f5b\") " pod="openstack/ceilometer-0" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.613500 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-run-httpd\") pod \"ceilometer-0\" (UID: \"8e906f47-2af7-4fae-8aa2-9bde21c41f5b\") " pod="openstack/ceilometer-0" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.614203 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-log-httpd\") pod \"ceilometer-0\" (UID: \"8e906f47-2af7-4fae-8aa2-9bde21c41f5b\") " pod="openstack/ceilometer-0" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.619286 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-scripts\") pod \"ceilometer-0\" (UID: \"8e906f47-2af7-4fae-8aa2-9bde21c41f5b\") " pod="openstack/ceilometer-0" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.625146 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8e906f47-2af7-4fae-8aa2-9bde21c41f5b\") " pod="openstack/ceilometer-0" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.627742 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-config-data\") pod \"ceilometer-0\" (UID: \"8e906f47-2af7-4fae-8aa2-9bde21c41f5b\") " pod="openstack/ceilometer-0" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.634084 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8e906f47-2af7-4fae-8aa2-9bde21c41f5b\") " pod="openstack/ceilometer-0" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.642466 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2n6zv\" (UniqueName: \"kubernetes.io/projected/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-kube-api-access-2n6zv\") pod \"ceilometer-0\" (UID: \"8e906f47-2af7-4fae-8aa2-9bde21c41f5b\") " pod="openstack/ceilometer-0" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.845005 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e50f7bc-1b06-447d-b556-6a7adc34b072" path="/var/lib/kubelet/pods/2e50f7bc-1b06-447d-b556-6a7adc34b072/volumes" Nov 25 17:05:13 crc kubenswrapper[4812]: I1125 17:05:13.911867 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 17:05:14 crc kubenswrapper[4812]: I1125 17:05:14.347998 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:05:14 crc kubenswrapper[4812]: I1125 17:05:14.359347 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb684768f-q79bp" event={"ID":"6c9583db-6c4d-4ab8-b076-5f01d3f40f30","Type":"ContainerStarted","Data":"55791fad9db2664f7bac5119aeda9388ffda8c6a4b4e792e24a6861ec84d00d3"} Nov 25 17:05:14 crc kubenswrapper[4812]: I1125 17:05:14.359502 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6bb684768f-q79bp" Nov 25 17:05:15 crc kubenswrapper[4812]: I1125 17:05:15.371763 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8e906f47-2af7-4fae-8aa2-9bde21c41f5b","Type":"ContainerStarted","Data":"1fcf02c0d6167ad60dd2e9ef9be9ac6b680dea7e1d910f100c1cead99f803ca3"} Nov 25 17:05:15 crc kubenswrapper[4812]: I1125 17:05:15.466495 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7766846cbb-5x8sl" Nov 25 17:05:15 crc kubenswrapper[4812]: I1125 17:05:15.494877 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6bb684768f-q79bp" podStartSLOduration=7.494853114 podStartE2EDuration="7.494853114s" podCreationTimestamp="2025-11-25 17:05:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:05:14.407860117 +0000 UTC m=+1089.248002232" watchObservedRunningTime="2025-11-25 17:05:15.494853114 +0000 UTC m=+1090.334995209" Nov 25 17:05:16 crc kubenswrapper[4812]: I1125 17:05:16.394439 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8e906f47-2af7-4fae-8aa2-9bde21c41f5b","Type":"ContainerStarted","Data":"80dfeb84ca827cd19f2bdee18aecb4ad1b32c4039d1200d483d4478467326750"} Nov 25 17:05:17 crc kubenswrapper[4812]: I1125 17:05:17.799648 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-c55b7b786-smvdd" Nov 25 17:05:17 crc kubenswrapper[4812]: I1125 17:05:17.800631 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-c55b7b786-smvdd" Nov 25 17:05:17 crc kubenswrapper[4812]: I1125 17:05:17.869621 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-7766846cbb-5x8sl"] Nov 25 17:05:17 crc kubenswrapper[4812]: I1125 17:05:17.869880 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-7766846cbb-5x8sl" podUID="d1ed42ec-dde8-4eae-9618-252764bca23d" containerName="barbican-api-log" containerID="cri-o://9b2c13eb71d02f3604a231521a62f15a9121d3ba9aeb0af084eeaac920eccf6b" gracePeriod=30 Nov 25 17:05:17 crc kubenswrapper[4812]: I1125 17:05:17.870296 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-7766846cbb-5x8sl" podUID="d1ed42ec-dde8-4eae-9618-252764bca23d" containerName="barbican-api" containerID="cri-o://0f8b2d066c37f9772f302e94c59a96d2b7e66380be7993bc1fa455b1fc8b9489" gracePeriod=30 Nov 25 17:05:18 crc kubenswrapper[4812]: I1125 17:05:18.410473 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8e906f47-2af7-4fae-8aa2-9bde21c41f5b","Type":"ContainerStarted","Data":"59b3568160a9648030cb271bf431816764b7a5f8ebbff9c08a6dadcbad303a3c"} Nov 25 17:05:18 crc kubenswrapper[4812]: I1125 17:05:18.412134 4812 generic.go:334] "Generic (PLEG): container finished" podID="d1ed42ec-dde8-4eae-9618-252764bca23d" containerID="9b2c13eb71d02f3604a231521a62f15a9121d3ba9aeb0af084eeaac920eccf6b" exitCode=143 Nov 25 17:05:18 crc kubenswrapper[4812]: I1125 17:05:18.412169 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7766846cbb-5x8sl" event={"ID":"d1ed42ec-dde8-4eae-9618-252764bca23d","Type":"ContainerDied","Data":"9b2c13eb71d02f3604a231521a62f15a9121d3ba9aeb0af084eeaac920eccf6b"} Nov 25 17:05:19 crc kubenswrapper[4812]: I1125 17:05:19.141725 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6bb684768f-q79bp" Nov 25 17:05:19 crc kubenswrapper[4812]: I1125 17:05:19.198754 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7987f74bbc-vhnzb"] Nov 25 17:05:19 crc kubenswrapper[4812]: I1125 17:05:19.199035 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7987f74bbc-vhnzb" podUID="6982125a-a01a-4eed-a7ce-e335ef73e14d" containerName="dnsmasq-dns" containerID="cri-o://72d8a6e1e56a23074b759106bb7cbf2b3c5aaca0a8ba664db713913fee6d05db" gracePeriod=10 Nov 25 17:05:19 crc kubenswrapper[4812]: I1125 17:05:19.423711 4812 generic.go:334] "Generic (PLEG): container finished" podID="6982125a-a01a-4eed-a7ce-e335ef73e14d" containerID="72d8a6e1e56a23074b759106bb7cbf2b3c5aaca0a8ba664db713913fee6d05db" exitCode=0 Nov 25 17:05:19 crc kubenswrapper[4812]: I1125 17:05:19.423763 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7987f74bbc-vhnzb" event={"ID":"6982125a-a01a-4eed-a7ce-e335ef73e14d","Type":"ContainerDied","Data":"72d8a6e1e56a23074b759106bb7cbf2b3c5aaca0a8ba664db713913fee6d05db"} Nov 25 17:05:20 crc kubenswrapper[4812]: I1125 17:05:20.130459 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7987f74bbc-vhnzb" Nov 25 17:05:20 crc kubenswrapper[4812]: I1125 17:05:20.155250 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6982125a-a01a-4eed-a7ce-e335ef73e14d-ovsdbserver-nb\") pod \"6982125a-a01a-4eed-a7ce-e335ef73e14d\" (UID: \"6982125a-a01a-4eed-a7ce-e335ef73e14d\") " Nov 25 17:05:20 crc kubenswrapper[4812]: I1125 17:05:20.155346 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6982125a-a01a-4eed-a7ce-e335ef73e14d-dns-svc\") pod \"6982125a-a01a-4eed-a7ce-e335ef73e14d\" (UID: \"6982125a-a01a-4eed-a7ce-e335ef73e14d\") " Nov 25 17:05:20 crc kubenswrapper[4812]: I1125 17:05:20.155481 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6982125a-a01a-4eed-a7ce-e335ef73e14d-config\") pod \"6982125a-a01a-4eed-a7ce-e335ef73e14d\" (UID: \"6982125a-a01a-4eed-a7ce-e335ef73e14d\") " Nov 25 17:05:20 crc kubenswrapper[4812]: I1125 17:05:20.155563 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6982125a-a01a-4eed-a7ce-e335ef73e14d-ovsdbserver-sb\") pod \"6982125a-a01a-4eed-a7ce-e335ef73e14d\" (UID: \"6982125a-a01a-4eed-a7ce-e335ef73e14d\") " Nov 25 17:05:20 crc kubenswrapper[4812]: I1125 17:05:20.155703 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gxxp5\" (UniqueName: \"kubernetes.io/projected/6982125a-a01a-4eed-a7ce-e335ef73e14d-kube-api-access-gxxp5\") pod \"6982125a-a01a-4eed-a7ce-e335ef73e14d\" (UID: \"6982125a-a01a-4eed-a7ce-e335ef73e14d\") " Nov 25 17:05:20 crc kubenswrapper[4812]: I1125 17:05:20.162283 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6982125a-a01a-4eed-a7ce-e335ef73e14d-kube-api-access-gxxp5" (OuterVolumeSpecName: "kube-api-access-gxxp5") pod "6982125a-a01a-4eed-a7ce-e335ef73e14d" (UID: "6982125a-a01a-4eed-a7ce-e335ef73e14d"). InnerVolumeSpecName "kube-api-access-gxxp5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:05:20 crc kubenswrapper[4812]: I1125 17:05:20.210198 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6982125a-a01a-4eed-a7ce-e335ef73e14d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "6982125a-a01a-4eed-a7ce-e335ef73e14d" (UID: "6982125a-a01a-4eed-a7ce-e335ef73e14d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:05:20 crc kubenswrapper[4812]: I1125 17:05:20.221937 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6982125a-a01a-4eed-a7ce-e335ef73e14d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6982125a-a01a-4eed-a7ce-e335ef73e14d" (UID: "6982125a-a01a-4eed-a7ce-e335ef73e14d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:05:20 crc kubenswrapper[4812]: I1125 17:05:20.227015 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6982125a-a01a-4eed-a7ce-e335ef73e14d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "6982125a-a01a-4eed-a7ce-e335ef73e14d" (UID: "6982125a-a01a-4eed-a7ce-e335ef73e14d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:05:20 crc kubenswrapper[4812]: I1125 17:05:20.229043 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6982125a-a01a-4eed-a7ce-e335ef73e14d-config" (OuterVolumeSpecName: "config") pod "6982125a-a01a-4eed-a7ce-e335ef73e14d" (UID: "6982125a-a01a-4eed-a7ce-e335ef73e14d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:05:20 crc kubenswrapper[4812]: I1125 17:05:20.258570 4812 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6982125a-a01a-4eed-a7ce-e335ef73e14d-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:20 crc kubenswrapper[4812]: I1125 17:05:20.258632 4812 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6982125a-a01a-4eed-a7ce-e335ef73e14d-config\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:20 crc kubenswrapper[4812]: I1125 17:05:20.258644 4812 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6982125a-a01a-4eed-a7ce-e335ef73e14d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:20 crc kubenswrapper[4812]: I1125 17:05:20.258661 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gxxp5\" (UniqueName: \"kubernetes.io/projected/6982125a-a01a-4eed-a7ce-e335ef73e14d-kube-api-access-gxxp5\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:20 crc kubenswrapper[4812]: I1125 17:05:20.258672 4812 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6982125a-a01a-4eed-a7ce-e335ef73e14d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:20 crc kubenswrapper[4812]: I1125 17:05:20.434097 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7987f74bbc-vhnzb" event={"ID":"6982125a-a01a-4eed-a7ce-e335ef73e14d","Type":"ContainerDied","Data":"81d027a07c3770f4c82c653687e7f164bec57fb90334d4f9960a1afce92a265a"} Nov 25 17:05:20 crc kubenswrapper[4812]: I1125 17:05:20.434126 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7987f74bbc-vhnzb" Nov 25 17:05:20 crc kubenswrapper[4812]: I1125 17:05:20.434222 4812 scope.go:117] "RemoveContainer" containerID="72d8a6e1e56a23074b759106bb7cbf2b3c5aaca0a8ba664db713913fee6d05db" Nov 25 17:05:20 crc kubenswrapper[4812]: I1125 17:05:20.438952 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8e906f47-2af7-4fae-8aa2-9bde21c41f5b","Type":"ContainerStarted","Data":"406148fd979e443955257808ae32b0c8fb09c037f1077e0f4f8e629c8e6ae834"} Nov 25 17:05:20 crc kubenswrapper[4812]: I1125 17:05:20.481915 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7987f74bbc-vhnzb"] Nov 25 17:05:20 crc kubenswrapper[4812]: I1125 17:05:20.488631 4812 scope.go:117] "RemoveContainer" containerID="03477f5e749b2090b26de554b708d6bdb4d61ebf9a12cfcda20fec08dafb2722" Nov 25 17:05:20 crc kubenswrapper[4812]: I1125 17:05:20.489027 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7987f74bbc-vhnzb"] Nov 25 17:05:21 crc kubenswrapper[4812]: I1125 17:05:21.009472 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-7766846cbb-5x8sl" podUID="d1ed42ec-dde8-4eae-9618-252764bca23d" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.148:9311/healthcheck\": read tcp 10.217.0.2:50196->10.217.0.148:9311: read: connection reset by peer" Nov 25 17:05:21 crc kubenswrapper[4812]: I1125 17:05:21.009519 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-7766846cbb-5x8sl" podUID="d1ed42ec-dde8-4eae-9618-252764bca23d" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.148:9311/healthcheck\": read tcp 10.217.0.2:50206->10.217.0.148:9311: read: connection reset by peer" Nov 25 17:05:21 crc kubenswrapper[4812]: I1125 17:05:21.453735 4812 generic.go:334] "Generic (PLEG): container finished" podID="d1ed42ec-dde8-4eae-9618-252764bca23d" containerID="0f8b2d066c37f9772f302e94c59a96d2b7e66380be7993bc1fa455b1fc8b9489" exitCode=0 Nov 25 17:05:21 crc kubenswrapper[4812]: I1125 17:05:21.453814 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7766846cbb-5x8sl" event={"ID":"d1ed42ec-dde8-4eae-9618-252764bca23d","Type":"ContainerDied","Data":"0f8b2d066c37f9772f302e94c59a96d2b7e66380be7993bc1fa455b1fc8b9489"} Nov 25 17:05:21 crc kubenswrapper[4812]: I1125 17:05:21.454115 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7766846cbb-5x8sl" event={"ID":"d1ed42ec-dde8-4eae-9618-252764bca23d","Type":"ContainerDied","Data":"9d2ef0c382cb0987de607b58407525b5473e5b65eb8dfac7e73d27372b080525"} Nov 25 17:05:21 crc kubenswrapper[4812]: I1125 17:05:21.454135 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9d2ef0c382cb0987de607b58407525b5473e5b65eb8dfac7e73d27372b080525" Nov 25 17:05:21 crc kubenswrapper[4812]: I1125 17:05:21.459463 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7766846cbb-5x8sl" Nov 25 17:05:21 crc kubenswrapper[4812]: I1125 17:05:21.482380 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d1ed42ec-dde8-4eae-9618-252764bca23d-config-data-custom\") pod \"d1ed42ec-dde8-4eae-9618-252764bca23d\" (UID: \"d1ed42ec-dde8-4eae-9618-252764bca23d\") " Nov 25 17:05:21 crc kubenswrapper[4812]: I1125 17:05:21.482677 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nkpd8\" (UniqueName: \"kubernetes.io/projected/d1ed42ec-dde8-4eae-9618-252764bca23d-kube-api-access-nkpd8\") pod \"d1ed42ec-dde8-4eae-9618-252764bca23d\" (UID: \"d1ed42ec-dde8-4eae-9618-252764bca23d\") " Nov 25 17:05:21 crc kubenswrapper[4812]: I1125 17:05:21.482794 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1ed42ec-dde8-4eae-9618-252764bca23d-logs\") pod \"d1ed42ec-dde8-4eae-9618-252764bca23d\" (UID: \"d1ed42ec-dde8-4eae-9618-252764bca23d\") " Nov 25 17:05:21 crc kubenswrapper[4812]: I1125 17:05:21.482926 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1ed42ec-dde8-4eae-9618-252764bca23d-config-data\") pod \"d1ed42ec-dde8-4eae-9618-252764bca23d\" (UID: \"d1ed42ec-dde8-4eae-9618-252764bca23d\") " Nov 25 17:05:21 crc kubenswrapper[4812]: I1125 17:05:21.483066 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1ed42ec-dde8-4eae-9618-252764bca23d-combined-ca-bundle\") pod \"d1ed42ec-dde8-4eae-9618-252764bca23d\" (UID: \"d1ed42ec-dde8-4eae-9618-252764bca23d\") " Nov 25 17:05:21 crc kubenswrapper[4812]: I1125 17:05:21.486135 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d1ed42ec-dde8-4eae-9618-252764bca23d-logs" (OuterVolumeSpecName: "logs") pod "d1ed42ec-dde8-4eae-9618-252764bca23d" (UID: "d1ed42ec-dde8-4eae-9618-252764bca23d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:05:21 crc kubenswrapper[4812]: I1125 17:05:21.490722 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1ed42ec-dde8-4eae-9618-252764bca23d-kube-api-access-nkpd8" (OuterVolumeSpecName: "kube-api-access-nkpd8") pod "d1ed42ec-dde8-4eae-9618-252764bca23d" (UID: "d1ed42ec-dde8-4eae-9618-252764bca23d"). InnerVolumeSpecName "kube-api-access-nkpd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:05:21 crc kubenswrapper[4812]: I1125 17:05:21.501057 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1ed42ec-dde8-4eae-9618-252764bca23d-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "d1ed42ec-dde8-4eae-9618-252764bca23d" (UID: "d1ed42ec-dde8-4eae-9618-252764bca23d"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:05:21 crc kubenswrapper[4812]: I1125 17:05:21.537678 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1ed42ec-dde8-4eae-9618-252764bca23d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d1ed42ec-dde8-4eae-9618-252764bca23d" (UID: "d1ed42ec-dde8-4eae-9618-252764bca23d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:05:21 crc kubenswrapper[4812]: I1125 17:05:21.549735 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1ed42ec-dde8-4eae-9618-252764bca23d-config-data" (OuterVolumeSpecName: "config-data") pod "d1ed42ec-dde8-4eae-9618-252764bca23d" (UID: "d1ed42ec-dde8-4eae-9618-252764bca23d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:05:21 crc kubenswrapper[4812]: I1125 17:05:21.586364 4812 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1ed42ec-dde8-4eae-9618-252764bca23d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:21 crc kubenswrapper[4812]: I1125 17:05:21.586421 4812 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d1ed42ec-dde8-4eae-9618-252764bca23d-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:21 crc kubenswrapper[4812]: I1125 17:05:21.586436 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nkpd8\" (UniqueName: \"kubernetes.io/projected/d1ed42ec-dde8-4eae-9618-252764bca23d-kube-api-access-nkpd8\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:21 crc kubenswrapper[4812]: I1125 17:05:21.586453 4812 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1ed42ec-dde8-4eae-9618-252764bca23d-logs\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:21 crc kubenswrapper[4812]: I1125 17:05:21.586465 4812 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1ed42ec-dde8-4eae-9618-252764bca23d-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:21 crc kubenswrapper[4812]: I1125 17:05:21.840734 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6982125a-a01a-4eed-a7ce-e335ef73e14d" path="/var/lib/kubelet/pods/6982125a-a01a-4eed-a7ce-e335ef73e14d/volumes" Nov 25 17:05:22 crc kubenswrapper[4812]: I1125 17:05:22.467231 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7766846cbb-5x8sl" Nov 25 17:05:22 crc kubenswrapper[4812]: I1125 17:05:22.467262 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8e906f47-2af7-4fae-8aa2-9bde21c41f5b","Type":"ContainerStarted","Data":"410f288b8d4db28b8d6f4dafb693da59d1ef5c8429f207007b9ceded74b4dafe"} Nov 25 17:05:22 crc kubenswrapper[4812]: I1125 17:05:22.467697 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 17:05:22 crc kubenswrapper[4812]: I1125 17:05:22.494605 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.673857256 podStartE2EDuration="9.49458604s" podCreationTimestamp="2025-11-25 17:05:13 +0000 UTC" firstStartedPulling="2025-11-25 17:05:14.358758763 +0000 UTC m=+1089.198900858" lastFinishedPulling="2025-11-25 17:05:21.179487547 +0000 UTC m=+1096.019629642" observedRunningTime="2025-11-25 17:05:22.489754858 +0000 UTC m=+1097.329896963" watchObservedRunningTime="2025-11-25 17:05:22.49458604 +0000 UTC m=+1097.334728135" Nov 25 17:05:22 crc kubenswrapper[4812]: I1125 17:05:22.515201 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-7766846cbb-5x8sl"] Nov 25 17:05:22 crc kubenswrapper[4812]: I1125 17:05:22.521743 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-7766846cbb-5x8sl"] Nov 25 17:05:23 crc kubenswrapper[4812]: I1125 17:05:23.479212 4812 generic.go:334] "Generic (PLEG): container finished" podID="ee5fe32b-eefd-4847-a053-b72c9f06e3b1" containerID="16b082e6594e10bff3c6e967d7fb7d3b81f5eb29c1cb6fe7e0f42347cb6a4353" exitCode=0 Nov 25 17:05:23 crc kubenswrapper[4812]: I1125 17:05:23.479323 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-xz5j9" event={"ID":"ee5fe32b-eefd-4847-a053-b72c9f06e3b1","Type":"ContainerDied","Data":"16b082e6594e10bff3c6e967d7fb7d3b81f5eb29c1cb6fe7e0f42347cb6a4353"} Nov 25 17:05:23 crc kubenswrapper[4812]: I1125 17:05:23.856336 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d1ed42ec-dde8-4eae-9618-252764bca23d" path="/var/lib/kubelet/pods/d1ed42ec-dde8-4eae-9618-252764bca23d/volumes" Nov 25 17:05:24 crc kubenswrapper[4812]: I1125 17:05:24.822065 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-xz5j9" Nov 25 17:05:24 crc kubenswrapper[4812]: I1125 17:05:24.868569 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jck2v\" (UniqueName: \"kubernetes.io/projected/ee5fe32b-eefd-4847-a053-b72c9f06e3b1-kube-api-access-jck2v\") pod \"ee5fe32b-eefd-4847-a053-b72c9f06e3b1\" (UID: \"ee5fe32b-eefd-4847-a053-b72c9f06e3b1\") " Nov 25 17:05:24 crc kubenswrapper[4812]: I1125 17:05:24.868643 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ee5fe32b-eefd-4847-a053-b72c9f06e3b1-db-sync-config-data\") pod \"ee5fe32b-eefd-4847-a053-b72c9f06e3b1\" (UID: \"ee5fe32b-eefd-4847-a053-b72c9f06e3b1\") " Nov 25 17:05:24 crc kubenswrapper[4812]: I1125 17:05:24.868836 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ee5fe32b-eefd-4847-a053-b72c9f06e3b1-scripts\") pod \"ee5fe32b-eefd-4847-a053-b72c9f06e3b1\" (UID: \"ee5fe32b-eefd-4847-a053-b72c9f06e3b1\") " Nov 25 17:05:24 crc kubenswrapper[4812]: I1125 17:05:24.868888 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee5fe32b-eefd-4847-a053-b72c9f06e3b1-config-data\") pod \"ee5fe32b-eefd-4847-a053-b72c9f06e3b1\" (UID: \"ee5fe32b-eefd-4847-a053-b72c9f06e3b1\") " Nov 25 17:05:24 crc kubenswrapper[4812]: I1125 17:05:24.868972 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ee5fe32b-eefd-4847-a053-b72c9f06e3b1-etc-machine-id\") pod \"ee5fe32b-eefd-4847-a053-b72c9f06e3b1\" (UID: \"ee5fe32b-eefd-4847-a053-b72c9f06e3b1\") " Nov 25 17:05:24 crc kubenswrapper[4812]: I1125 17:05:24.868997 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee5fe32b-eefd-4847-a053-b72c9f06e3b1-combined-ca-bundle\") pod \"ee5fe32b-eefd-4847-a053-b72c9f06e3b1\" (UID: \"ee5fe32b-eefd-4847-a053-b72c9f06e3b1\") " Nov 25 17:05:24 crc kubenswrapper[4812]: I1125 17:05:24.872961 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ee5fe32b-eefd-4847-a053-b72c9f06e3b1-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "ee5fe32b-eefd-4847-a053-b72c9f06e3b1" (UID: "ee5fe32b-eefd-4847-a053-b72c9f06e3b1"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 17:05:24 crc kubenswrapper[4812]: I1125 17:05:24.874108 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee5fe32b-eefd-4847-a053-b72c9f06e3b1-kube-api-access-jck2v" (OuterVolumeSpecName: "kube-api-access-jck2v") pod "ee5fe32b-eefd-4847-a053-b72c9f06e3b1" (UID: "ee5fe32b-eefd-4847-a053-b72c9f06e3b1"). InnerVolumeSpecName "kube-api-access-jck2v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:05:24 crc kubenswrapper[4812]: I1125 17:05:24.874854 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee5fe32b-eefd-4847-a053-b72c9f06e3b1-scripts" (OuterVolumeSpecName: "scripts") pod "ee5fe32b-eefd-4847-a053-b72c9f06e3b1" (UID: "ee5fe32b-eefd-4847-a053-b72c9f06e3b1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:05:24 crc kubenswrapper[4812]: I1125 17:05:24.877234 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee5fe32b-eefd-4847-a053-b72c9f06e3b1-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "ee5fe32b-eefd-4847-a053-b72c9f06e3b1" (UID: "ee5fe32b-eefd-4847-a053-b72c9f06e3b1"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:05:24 crc kubenswrapper[4812]: I1125 17:05:24.893286 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee5fe32b-eefd-4847-a053-b72c9f06e3b1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ee5fe32b-eefd-4847-a053-b72c9f06e3b1" (UID: "ee5fe32b-eefd-4847-a053-b72c9f06e3b1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:05:24 crc kubenswrapper[4812]: I1125 17:05:24.916692 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee5fe32b-eefd-4847-a053-b72c9f06e3b1-config-data" (OuterVolumeSpecName: "config-data") pod "ee5fe32b-eefd-4847-a053-b72c9f06e3b1" (UID: "ee5fe32b-eefd-4847-a053-b72c9f06e3b1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:05:24 crc kubenswrapper[4812]: I1125 17:05:24.971239 4812 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ee5fe32b-eefd-4847-a053-b72c9f06e3b1-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:24 crc kubenswrapper[4812]: I1125 17:05:24.971281 4812 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee5fe32b-eefd-4847-a053-b72c9f06e3b1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:24 crc kubenswrapper[4812]: I1125 17:05:24.971293 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jck2v\" (UniqueName: \"kubernetes.io/projected/ee5fe32b-eefd-4847-a053-b72c9f06e3b1-kube-api-access-jck2v\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:24 crc kubenswrapper[4812]: I1125 17:05:24.971306 4812 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ee5fe32b-eefd-4847-a053-b72c9f06e3b1-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:24 crc kubenswrapper[4812]: I1125 17:05:24.971319 4812 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ee5fe32b-eefd-4847-a053-b72c9f06e3b1-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:24 crc kubenswrapper[4812]: I1125 17:05:24.971329 4812 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee5fe32b-eefd-4847-a053-b72c9f06e3b1-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:25 crc kubenswrapper[4812]: I1125 17:05:25.500299 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-xz5j9" event={"ID":"ee5fe32b-eefd-4847-a053-b72c9f06e3b1","Type":"ContainerDied","Data":"42d9bde1b2e2efbb269a7fab47574a64374b896ab8a00f6da56556c9f1248643"} Nov 25 17:05:25 crc kubenswrapper[4812]: I1125 17:05:25.500824 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="42d9bde1b2e2efbb269a7fab47574a64374b896ab8a00f6da56556c9f1248643" Nov 25 17:05:25 crc kubenswrapper[4812]: I1125 17:05:25.500922 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-xz5j9" Nov 25 17:05:25 crc kubenswrapper[4812]: I1125 17:05:25.847337 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 17:05:25 crc kubenswrapper[4812]: E1125 17:05:25.850107 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1ed42ec-dde8-4eae-9618-252764bca23d" containerName="barbican-api" Nov 25 17:05:25 crc kubenswrapper[4812]: I1125 17:05:25.850141 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1ed42ec-dde8-4eae-9618-252764bca23d" containerName="barbican-api" Nov 25 17:05:25 crc kubenswrapper[4812]: E1125 17:05:25.850157 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6982125a-a01a-4eed-a7ce-e335ef73e14d" containerName="dnsmasq-dns" Nov 25 17:05:25 crc kubenswrapper[4812]: I1125 17:05:25.850163 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="6982125a-a01a-4eed-a7ce-e335ef73e14d" containerName="dnsmasq-dns" Nov 25 17:05:25 crc kubenswrapper[4812]: E1125 17:05:25.850179 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee5fe32b-eefd-4847-a053-b72c9f06e3b1" containerName="cinder-db-sync" Nov 25 17:05:25 crc kubenswrapper[4812]: I1125 17:05:25.850185 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee5fe32b-eefd-4847-a053-b72c9f06e3b1" containerName="cinder-db-sync" Nov 25 17:05:25 crc kubenswrapper[4812]: E1125 17:05:25.850195 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1ed42ec-dde8-4eae-9618-252764bca23d" containerName="barbican-api-log" Nov 25 17:05:25 crc kubenswrapper[4812]: I1125 17:05:25.850202 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1ed42ec-dde8-4eae-9618-252764bca23d" containerName="barbican-api-log" Nov 25 17:05:25 crc kubenswrapper[4812]: E1125 17:05:25.850213 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6982125a-a01a-4eed-a7ce-e335ef73e14d" containerName="init" Nov 25 17:05:25 crc kubenswrapper[4812]: I1125 17:05:25.850219 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="6982125a-a01a-4eed-a7ce-e335ef73e14d" containerName="init" Nov 25 17:05:25 crc kubenswrapper[4812]: I1125 17:05:25.850386 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee5fe32b-eefd-4847-a053-b72c9f06e3b1" containerName="cinder-db-sync" Nov 25 17:05:25 crc kubenswrapper[4812]: I1125 17:05:25.850404 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1ed42ec-dde8-4eae-9618-252764bca23d" containerName="barbican-api" Nov 25 17:05:25 crc kubenswrapper[4812]: I1125 17:05:25.850412 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1ed42ec-dde8-4eae-9618-252764bca23d" containerName="barbican-api-log" Nov 25 17:05:25 crc kubenswrapper[4812]: I1125 17:05:25.850424 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="6982125a-a01a-4eed-a7ce-e335ef73e14d" containerName="dnsmasq-dns" Nov 25 17:05:25 crc kubenswrapper[4812]: I1125 17:05:25.851595 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 17:05:25 crc kubenswrapper[4812]: I1125 17:05:25.861032 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6d97fcdd8f-55tn6"] Nov 25 17:05:25 crc kubenswrapper[4812]: I1125 17:05:25.862571 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d97fcdd8f-55tn6" Nov 25 17:05:25 crc kubenswrapper[4812]: I1125 17:05:25.864519 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 25 17:05:25 crc kubenswrapper[4812]: I1125 17:05:25.864871 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 25 17:05:25 crc kubenswrapper[4812]: I1125 17:05:25.865328 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 25 17:05:25 crc kubenswrapper[4812]: I1125 17:05:25.865780 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-h9qch" Nov 25 17:05:25 crc kubenswrapper[4812]: I1125 17:05:25.874853 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 17:05:25 crc kubenswrapper[4812]: I1125 17:05:25.897332 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d97fcdd8f-55tn6"] Nov 25 17:05:25 crc kubenswrapper[4812]: I1125 17:05:25.991667 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d9186fd5-246f-4bb3-b3b8-926d6d66ed25-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"d9186fd5-246f-4bb3-b3b8-926d6d66ed25\") " pod="openstack/cinder-scheduler-0" Nov 25 17:05:25 crc kubenswrapper[4812]: I1125 17:05:25.991759 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vmxrp\" (UniqueName: \"kubernetes.io/projected/d9186fd5-246f-4bb3-b3b8-926d6d66ed25-kube-api-access-vmxrp\") pod \"cinder-scheduler-0\" (UID: \"d9186fd5-246f-4bb3-b3b8-926d6d66ed25\") " pod="openstack/cinder-scheduler-0" Nov 25 17:05:25 crc kubenswrapper[4812]: I1125 17:05:25.991796 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/75972e9f-7329-4346-b291-05533c8a926a-ovsdbserver-nb\") pod \"dnsmasq-dns-6d97fcdd8f-55tn6\" (UID: \"75972e9f-7329-4346-b291-05533c8a926a\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-55tn6" Nov 25 17:05:25 crc kubenswrapper[4812]: I1125 17:05:25.991931 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/75972e9f-7329-4346-b291-05533c8a926a-dns-svc\") pod \"dnsmasq-dns-6d97fcdd8f-55tn6\" (UID: \"75972e9f-7329-4346-b291-05533c8a926a\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-55tn6" Nov 25 17:05:25 crc kubenswrapper[4812]: I1125 17:05:25.991983 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d9186fd5-246f-4bb3-b3b8-926d6d66ed25-scripts\") pod \"cinder-scheduler-0\" (UID: \"d9186fd5-246f-4bb3-b3b8-926d6d66ed25\") " pod="openstack/cinder-scheduler-0" Nov 25 17:05:25 crc kubenswrapper[4812]: I1125 17:05:25.992004 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9186fd5-246f-4bb3-b3b8-926d6d66ed25-config-data\") pod \"cinder-scheduler-0\" (UID: \"d9186fd5-246f-4bb3-b3b8-926d6d66ed25\") " pod="openstack/cinder-scheduler-0" Nov 25 17:05:25 crc kubenswrapper[4812]: I1125 17:05:25.992103 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/75972e9f-7329-4346-b291-05533c8a926a-ovsdbserver-sb\") pod \"dnsmasq-dns-6d97fcdd8f-55tn6\" (UID: \"75972e9f-7329-4346-b291-05533c8a926a\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-55tn6" Nov 25 17:05:25 crc kubenswrapper[4812]: I1125 17:05:25.992128 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d9186fd5-246f-4bb3-b3b8-926d6d66ed25-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"d9186fd5-246f-4bb3-b3b8-926d6d66ed25\") " pod="openstack/cinder-scheduler-0" Nov 25 17:05:25 crc kubenswrapper[4812]: I1125 17:05:25.992162 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/75972e9f-7329-4346-b291-05533c8a926a-config\") pod \"dnsmasq-dns-6d97fcdd8f-55tn6\" (UID: \"75972e9f-7329-4346-b291-05533c8a926a\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-55tn6" Nov 25 17:05:25 crc kubenswrapper[4812]: I1125 17:05:25.992234 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fjvvl\" (UniqueName: \"kubernetes.io/projected/75972e9f-7329-4346-b291-05533c8a926a-kube-api-access-fjvvl\") pod \"dnsmasq-dns-6d97fcdd8f-55tn6\" (UID: \"75972e9f-7329-4346-b291-05533c8a926a\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-55tn6" Nov 25 17:05:25 crc kubenswrapper[4812]: I1125 17:05:25.992356 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9186fd5-246f-4bb3-b3b8-926d6d66ed25-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"d9186fd5-246f-4bb3-b3b8-926d6d66ed25\") " pod="openstack/cinder-scheduler-0" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.093412 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/75972e9f-7329-4346-b291-05533c8a926a-ovsdbserver-sb\") pod \"dnsmasq-dns-6d97fcdd8f-55tn6\" (UID: \"75972e9f-7329-4346-b291-05533c8a926a\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-55tn6" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.093459 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d9186fd5-246f-4bb3-b3b8-926d6d66ed25-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"d9186fd5-246f-4bb3-b3b8-926d6d66ed25\") " pod="openstack/cinder-scheduler-0" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.093491 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/75972e9f-7329-4346-b291-05533c8a926a-config\") pod \"dnsmasq-dns-6d97fcdd8f-55tn6\" (UID: \"75972e9f-7329-4346-b291-05533c8a926a\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-55tn6" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.093545 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fjvvl\" (UniqueName: \"kubernetes.io/projected/75972e9f-7329-4346-b291-05533c8a926a-kube-api-access-fjvvl\") pod \"dnsmasq-dns-6d97fcdd8f-55tn6\" (UID: \"75972e9f-7329-4346-b291-05533c8a926a\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-55tn6" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.093583 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9186fd5-246f-4bb3-b3b8-926d6d66ed25-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"d9186fd5-246f-4bb3-b3b8-926d6d66ed25\") " pod="openstack/cinder-scheduler-0" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.093633 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d9186fd5-246f-4bb3-b3b8-926d6d66ed25-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"d9186fd5-246f-4bb3-b3b8-926d6d66ed25\") " pod="openstack/cinder-scheduler-0" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.093650 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vmxrp\" (UniqueName: \"kubernetes.io/projected/d9186fd5-246f-4bb3-b3b8-926d6d66ed25-kube-api-access-vmxrp\") pod \"cinder-scheduler-0\" (UID: \"d9186fd5-246f-4bb3-b3b8-926d6d66ed25\") " pod="openstack/cinder-scheduler-0" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.093673 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/75972e9f-7329-4346-b291-05533c8a926a-ovsdbserver-nb\") pod \"dnsmasq-dns-6d97fcdd8f-55tn6\" (UID: \"75972e9f-7329-4346-b291-05533c8a926a\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-55tn6" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.093693 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/75972e9f-7329-4346-b291-05533c8a926a-dns-svc\") pod \"dnsmasq-dns-6d97fcdd8f-55tn6\" (UID: \"75972e9f-7329-4346-b291-05533c8a926a\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-55tn6" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.093712 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d9186fd5-246f-4bb3-b3b8-926d6d66ed25-scripts\") pod \"cinder-scheduler-0\" (UID: \"d9186fd5-246f-4bb3-b3b8-926d6d66ed25\") " pod="openstack/cinder-scheduler-0" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.093728 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9186fd5-246f-4bb3-b3b8-926d6d66ed25-config-data\") pod \"cinder-scheduler-0\" (UID: \"d9186fd5-246f-4bb3-b3b8-926d6d66ed25\") " pod="openstack/cinder-scheduler-0" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.093976 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d9186fd5-246f-4bb3-b3b8-926d6d66ed25-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"d9186fd5-246f-4bb3-b3b8-926d6d66ed25\") " pod="openstack/cinder-scheduler-0" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.094639 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/75972e9f-7329-4346-b291-05533c8a926a-ovsdbserver-sb\") pod \"dnsmasq-dns-6d97fcdd8f-55tn6\" (UID: \"75972e9f-7329-4346-b291-05533c8a926a\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-55tn6" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.095568 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/75972e9f-7329-4346-b291-05533c8a926a-dns-svc\") pod \"dnsmasq-dns-6d97fcdd8f-55tn6\" (UID: \"75972e9f-7329-4346-b291-05533c8a926a\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-55tn6" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.097598 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/75972e9f-7329-4346-b291-05533c8a926a-config\") pod \"dnsmasq-dns-6d97fcdd8f-55tn6\" (UID: \"75972e9f-7329-4346-b291-05533c8a926a\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-55tn6" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.098114 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/75972e9f-7329-4346-b291-05533c8a926a-ovsdbserver-nb\") pod \"dnsmasq-dns-6d97fcdd8f-55tn6\" (UID: \"75972e9f-7329-4346-b291-05533c8a926a\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-55tn6" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.108173 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d9186fd5-246f-4bb3-b3b8-926d6d66ed25-scripts\") pod \"cinder-scheduler-0\" (UID: \"d9186fd5-246f-4bb3-b3b8-926d6d66ed25\") " pod="openstack/cinder-scheduler-0" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.109801 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.113933 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.115515 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.116440 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9186fd5-246f-4bb3-b3b8-926d6d66ed25-config-data\") pod \"cinder-scheduler-0\" (UID: \"d9186fd5-246f-4bb3-b3b8-926d6d66ed25\") " pod="openstack/cinder-scheduler-0" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.118488 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9186fd5-246f-4bb3-b3b8-926d6d66ed25-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"d9186fd5-246f-4bb3-b3b8-926d6d66ed25\") " pod="openstack/cinder-scheduler-0" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.120273 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vmxrp\" (UniqueName: \"kubernetes.io/projected/d9186fd5-246f-4bb3-b3b8-926d6d66ed25-kube-api-access-vmxrp\") pod \"cinder-scheduler-0\" (UID: \"d9186fd5-246f-4bb3-b3b8-926d6d66ed25\") " pod="openstack/cinder-scheduler-0" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.127822 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fjvvl\" (UniqueName: \"kubernetes.io/projected/75972e9f-7329-4346-b291-05533c8a926a-kube-api-access-fjvvl\") pod \"dnsmasq-dns-6d97fcdd8f-55tn6\" (UID: \"75972e9f-7329-4346-b291-05533c8a926a\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-55tn6" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.133269 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d9186fd5-246f-4bb3-b3b8-926d6d66ed25-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"d9186fd5-246f-4bb3-b3b8-926d6d66ed25\") " pod="openstack/cinder-scheduler-0" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.135799 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.180296 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.189798 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d97fcdd8f-55tn6" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.196062 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/67645c97-088f-4e92-a739-3aaae5c02223-logs\") pod \"cinder-api-0\" (UID: \"67645c97-088f-4e92-a739-3aaae5c02223\") " pod="openstack/cinder-api-0" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.196163 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zq84z\" (UniqueName: \"kubernetes.io/projected/67645c97-088f-4e92-a739-3aaae5c02223-kube-api-access-zq84z\") pod \"cinder-api-0\" (UID: \"67645c97-088f-4e92-a739-3aaae5c02223\") " pod="openstack/cinder-api-0" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.196216 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67645c97-088f-4e92-a739-3aaae5c02223-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"67645c97-088f-4e92-a739-3aaae5c02223\") " pod="openstack/cinder-api-0" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.196266 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/67645c97-088f-4e92-a739-3aaae5c02223-etc-machine-id\") pod \"cinder-api-0\" (UID: \"67645c97-088f-4e92-a739-3aaae5c02223\") " pod="openstack/cinder-api-0" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.196297 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67645c97-088f-4e92-a739-3aaae5c02223-scripts\") pod \"cinder-api-0\" (UID: \"67645c97-088f-4e92-a739-3aaae5c02223\") " pod="openstack/cinder-api-0" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.196323 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/67645c97-088f-4e92-a739-3aaae5c02223-config-data-custom\") pod \"cinder-api-0\" (UID: \"67645c97-088f-4e92-a739-3aaae5c02223\") " pod="openstack/cinder-api-0" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.196352 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67645c97-088f-4e92-a739-3aaae5c02223-config-data\") pod \"cinder-api-0\" (UID: \"67645c97-088f-4e92-a739-3aaae5c02223\") " pod="openstack/cinder-api-0" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.301506 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/67645c97-088f-4e92-a739-3aaae5c02223-etc-machine-id\") pod \"cinder-api-0\" (UID: \"67645c97-088f-4e92-a739-3aaae5c02223\") " pod="openstack/cinder-api-0" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.301589 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67645c97-088f-4e92-a739-3aaae5c02223-scripts\") pod \"cinder-api-0\" (UID: \"67645c97-088f-4e92-a739-3aaae5c02223\") " pod="openstack/cinder-api-0" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.301619 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/67645c97-088f-4e92-a739-3aaae5c02223-config-data-custom\") pod \"cinder-api-0\" (UID: \"67645c97-088f-4e92-a739-3aaae5c02223\") " pod="openstack/cinder-api-0" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.301648 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67645c97-088f-4e92-a739-3aaae5c02223-config-data\") pod \"cinder-api-0\" (UID: \"67645c97-088f-4e92-a739-3aaae5c02223\") " pod="openstack/cinder-api-0" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.301752 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/67645c97-088f-4e92-a739-3aaae5c02223-logs\") pod \"cinder-api-0\" (UID: \"67645c97-088f-4e92-a739-3aaae5c02223\") " pod="openstack/cinder-api-0" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.301818 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zq84z\" (UniqueName: \"kubernetes.io/projected/67645c97-088f-4e92-a739-3aaae5c02223-kube-api-access-zq84z\") pod \"cinder-api-0\" (UID: \"67645c97-088f-4e92-a739-3aaae5c02223\") " pod="openstack/cinder-api-0" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.301870 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67645c97-088f-4e92-a739-3aaae5c02223-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"67645c97-088f-4e92-a739-3aaae5c02223\") " pod="openstack/cinder-api-0" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.307863 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/67645c97-088f-4e92-a739-3aaae5c02223-etc-machine-id\") pod \"cinder-api-0\" (UID: \"67645c97-088f-4e92-a739-3aaae5c02223\") " pod="openstack/cinder-api-0" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.310340 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/67645c97-088f-4e92-a739-3aaae5c02223-logs\") pod \"cinder-api-0\" (UID: \"67645c97-088f-4e92-a739-3aaae5c02223\") " pod="openstack/cinder-api-0" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.315645 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67645c97-088f-4e92-a739-3aaae5c02223-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"67645c97-088f-4e92-a739-3aaae5c02223\") " pod="openstack/cinder-api-0" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.315854 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67645c97-088f-4e92-a739-3aaae5c02223-config-data\") pod \"cinder-api-0\" (UID: \"67645c97-088f-4e92-a739-3aaae5c02223\") " pod="openstack/cinder-api-0" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.316352 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67645c97-088f-4e92-a739-3aaae5c02223-scripts\") pod \"cinder-api-0\" (UID: \"67645c97-088f-4e92-a739-3aaae5c02223\") " pod="openstack/cinder-api-0" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.316605 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/67645c97-088f-4e92-a739-3aaae5c02223-config-data-custom\") pod \"cinder-api-0\" (UID: \"67645c97-088f-4e92-a739-3aaae5c02223\") " pod="openstack/cinder-api-0" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.341243 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zq84z\" (UniqueName: \"kubernetes.io/projected/67645c97-088f-4e92-a739-3aaae5c02223-kube-api-access-zq84z\") pod \"cinder-api-0\" (UID: \"67645c97-088f-4e92-a739-3aaae5c02223\") " pod="openstack/cinder-api-0" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.363876 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.704691 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d97fcdd8f-55tn6"] Nov 25 17:05:26 crc kubenswrapper[4812]: W1125 17:05:26.713200 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod75972e9f_7329_4346_b291_05533c8a926a.slice/crio-6769a4849d582c61bd9fef59a1af825939d678e16be9d7f98a4388f55a8d958c WatchSource:0}: Error finding container 6769a4849d582c61bd9fef59a1af825939d678e16be9d7f98a4388f55a8d958c: Status 404 returned error can't find the container with id 6769a4849d582c61bd9fef59a1af825939d678e16be9d7f98a4388f55a8d958c Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.714675 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 17:05:26 crc kubenswrapper[4812]: I1125 17:05:26.880105 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 17:05:26 crc kubenswrapper[4812]: W1125 17:05:26.882155 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod67645c97_088f_4e92_a739_3aaae5c02223.slice/crio-58077e05d9d9791431247cca23a93be7cedb56318b1abbf88a6c36ca118c39a1 WatchSource:0}: Error finding container 58077e05d9d9791431247cca23a93be7cedb56318b1abbf88a6c36ca118c39a1: Status 404 returned error can't find the container with id 58077e05d9d9791431247cca23a93be7cedb56318b1abbf88a6c36ca118c39a1 Nov 25 17:05:27 crc kubenswrapper[4812]: I1125 17:05:27.541420 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"67645c97-088f-4e92-a739-3aaae5c02223","Type":"ContainerStarted","Data":"54e58c9f9593a48db448e382962542109a1ba00fa703779ccbcafa558b225393"} Nov 25 17:05:27 crc kubenswrapper[4812]: I1125 17:05:27.541808 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"67645c97-088f-4e92-a739-3aaae5c02223","Type":"ContainerStarted","Data":"58077e05d9d9791431247cca23a93be7cedb56318b1abbf88a6c36ca118c39a1"} Nov 25 17:05:27 crc kubenswrapper[4812]: I1125 17:05:27.543002 4812 generic.go:334] "Generic (PLEG): container finished" podID="75972e9f-7329-4346-b291-05533c8a926a" containerID="f8cfea7e90d42777c0a3c20bc2730ed7edd94a5c45e3615d2ee25735785cb7dc" exitCode=0 Nov 25 17:05:27 crc kubenswrapper[4812]: I1125 17:05:27.543067 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d97fcdd8f-55tn6" event={"ID":"75972e9f-7329-4346-b291-05533c8a926a","Type":"ContainerDied","Data":"f8cfea7e90d42777c0a3c20bc2730ed7edd94a5c45e3615d2ee25735785cb7dc"} Nov 25 17:05:27 crc kubenswrapper[4812]: I1125 17:05:27.543095 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d97fcdd8f-55tn6" event={"ID":"75972e9f-7329-4346-b291-05533c8a926a","Type":"ContainerStarted","Data":"6769a4849d582c61bd9fef59a1af825939d678e16be9d7f98a4388f55a8d958c"} Nov 25 17:05:27 crc kubenswrapper[4812]: I1125 17:05:27.545701 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d9186fd5-246f-4bb3-b3b8-926d6d66ed25","Type":"ContainerStarted","Data":"4f0a2cdb83515c3163d22adb2d1ac76d7056b94ca36805608dbdc919370ed73d"} Nov 25 17:05:28 crc kubenswrapper[4812]: I1125 17:05:28.099654 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 25 17:05:28 crc kubenswrapper[4812]: I1125 17:05:28.580207 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d9186fd5-246f-4bb3-b3b8-926d6d66ed25","Type":"ContainerStarted","Data":"7d1a098a6b47e744784d9e942af465edc316f42eef7e52d2632632bdf6008bc8"} Nov 25 17:05:28 crc kubenswrapper[4812]: I1125 17:05:28.593089 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"67645c97-088f-4e92-a739-3aaae5c02223","Type":"ContainerStarted","Data":"8252c6a0ee4b59715df320268de65d5a2beb9b7f86fbd375c9f63d3c78b0bad4"} Nov 25 17:05:28 crc kubenswrapper[4812]: I1125 17:05:28.593258 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="67645c97-088f-4e92-a739-3aaae5c02223" containerName="cinder-api-log" containerID="cri-o://54e58c9f9593a48db448e382962542109a1ba00fa703779ccbcafa558b225393" gracePeriod=30 Nov 25 17:05:28 crc kubenswrapper[4812]: I1125 17:05:28.593560 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 25 17:05:28 crc kubenswrapper[4812]: I1125 17:05:28.593655 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="67645c97-088f-4e92-a739-3aaae5c02223" containerName="cinder-api" containerID="cri-o://8252c6a0ee4b59715df320268de65d5a2beb9b7f86fbd375c9f63d3c78b0bad4" gracePeriod=30 Nov 25 17:05:28 crc kubenswrapper[4812]: I1125 17:05:28.614428 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d97fcdd8f-55tn6" event={"ID":"75972e9f-7329-4346-b291-05533c8a926a","Type":"ContainerStarted","Data":"4eb57618a7cddfb38b8cade847a6ed872d79c39bed36ed8694d9559a2bde3b39"} Nov 25 17:05:28 crc kubenswrapper[4812]: I1125 17:05:28.614721 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6d97fcdd8f-55tn6" Nov 25 17:05:28 crc kubenswrapper[4812]: I1125 17:05:28.653845 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=2.6538210700000002 podStartE2EDuration="2.65382107s" podCreationTimestamp="2025-11-25 17:05:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:05:28.620428766 +0000 UTC m=+1103.460570861" watchObservedRunningTime="2025-11-25 17:05:28.65382107 +0000 UTC m=+1103.493963165" Nov 25 17:05:28 crc kubenswrapper[4812]: I1125 17:05:28.660750 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6d97fcdd8f-55tn6" podStartSLOduration=3.660730199 podStartE2EDuration="3.660730199s" podCreationTimestamp="2025-11-25 17:05:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:05:28.648731711 +0000 UTC m=+1103.488873816" watchObservedRunningTime="2025-11-25 17:05:28.660730199 +0000 UTC m=+1103.500872294" Nov 25 17:05:28 crc kubenswrapper[4812]: I1125 17:05:28.959331 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-995749db8-8znhz" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.083396 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-995749db8-8znhz" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.247752 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.377004 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/67645c97-088f-4e92-a739-3aaae5c02223-config-data-custom\") pod \"67645c97-088f-4e92-a739-3aaae5c02223\" (UID: \"67645c97-088f-4e92-a739-3aaae5c02223\") " Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.377099 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zq84z\" (UniqueName: \"kubernetes.io/projected/67645c97-088f-4e92-a739-3aaae5c02223-kube-api-access-zq84z\") pod \"67645c97-088f-4e92-a739-3aaae5c02223\" (UID: \"67645c97-088f-4e92-a739-3aaae5c02223\") " Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.377152 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67645c97-088f-4e92-a739-3aaae5c02223-scripts\") pod \"67645c97-088f-4e92-a739-3aaae5c02223\" (UID: \"67645c97-088f-4e92-a739-3aaae5c02223\") " Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.377216 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67645c97-088f-4e92-a739-3aaae5c02223-config-data\") pod \"67645c97-088f-4e92-a739-3aaae5c02223\" (UID: \"67645c97-088f-4e92-a739-3aaae5c02223\") " Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.377249 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/67645c97-088f-4e92-a739-3aaae5c02223-logs\") pod \"67645c97-088f-4e92-a739-3aaae5c02223\" (UID: \"67645c97-088f-4e92-a739-3aaae5c02223\") " Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.377285 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/67645c97-088f-4e92-a739-3aaae5c02223-etc-machine-id\") pod \"67645c97-088f-4e92-a739-3aaae5c02223\" (UID: \"67645c97-088f-4e92-a739-3aaae5c02223\") " Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.377327 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67645c97-088f-4e92-a739-3aaae5c02223-combined-ca-bundle\") pod \"67645c97-088f-4e92-a739-3aaae5c02223\" (UID: \"67645c97-088f-4e92-a739-3aaae5c02223\") " Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.379446 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/67645c97-088f-4e92-a739-3aaae5c02223-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "67645c97-088f-4e92-a739-3aaae5c02223" (UID: "67645c97-088f-4e92-a739-3aaae5c02223"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.399727 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/67645c97-088f-4e92-a739-3aaae5c02223-logs" (OuterVolumeSpecName: "logs") pod "67645c97-088f-4e92-a739-3aaae5c02223" (UID: "67645c97-088f-4e92-a739-3aaae5c02223"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.415694 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/67645c97-088f-4e92-a739-3aaae5c02223-kube-api-access-zq84z" (OuterVolumeSpecName: "kube-api-access-zq84z") pod "67645c97-088f-4e92-a739-3aaae5c02223" (UID: "67645c97-088f-4e92-a739-3aaae5c02223"). InnerVolumeSpecName "kube-api-access-zq84z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.419719 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/67645c97-088f-4e92-a739-3aaae5c02223-scripts" (OuterVolumeSpecName: "scripts") pod "67645c97-088f-4e92-a739-3aaae5c02223" (UID: "67645c97-088f-4e92-a739-3aaae5c02223"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.418721 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/67645c97-088f-4e92-a739-3aaae5c02223-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "67645c97-088f-4e92-a739-3aaae5c02223" (UID: "67645c97-088f-4e92-a739-3aaae5c02223"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.433707 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/67645c97-088f-4e92-a739-3aaae5c02223-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "67645c97-088f-4e92-a739-3aaae5c02223" (UID: "67645c97-088f-4e92-a739-3aaae5c02223"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.446434 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/67645c97-088f-4e92-a739-3aaae5c02223-config-data" (OuterVolumeSpecName: "config-data") pod "67645c97-088f-4e92-a739-3aaae5c02223" (UID: "67645c97-088f-4e92-a739-3aaae5c02223"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.479407 4812 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67645c97-088f-4e92-a739-3aaae5c02223-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.479445 4812 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/67645c97-088f-4e92-a739-3aaae5c02223-logs\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.479457 4812 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/67645c97-088f-4e92-a739-3aaae5c02223-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.479472 4812 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67645c97-088f-4e92-a739-3aaae5c02223-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.479484 4812 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/67645c97-088f-4e92-a739-3aaae5c02223-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.479496 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zq84z\" (UniqueName: \"kubernetes.io/projected/67645c97-088f-4e92-a739-3aaae5c02223-kube-api-access-zq84z\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.479507 4812 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67645c97-088f-4e92-a739-3aaae5c02223-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.616670 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-849d784859-xzm4f" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.624257 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d9186fd5-246f-4bb3-b3b8-926d6d66ed25","Type":"ContainerStarted","Data":"eedc0473e6463e813a9e85f807e7a5e58cd743e9f12503905ea316bd03e0b40d"} Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.627287 4812 generic.go:334] "Generic (PLEG): container finished" podID="67645c97-088f-4e92-a739-3aaae5c02223" containerID="8252c6a0ee4b59715df320268de65d5a2beb9b7f86fbd375c9f63d3c78b0bad4" exitCode=0 Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.627316 4812 generic.go:334] "Generic (PLEG): container finished" podID="67645c97-088f-4e92-a739-3aaae5c02223" containerID="54e58c9f9593a48db448e382962542109a1ba00fa703779ccbcafa558b225393" exitCode=143 Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.627929 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.632186 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"67645c97-088f-4e92-a739-3aaae5c02223","Type":"ContainerDied","Data":"8252c6a0ee4b59715df320268de65d5a2beb9b7f86fbd375c9f63d3c78b0bad4"} Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.632220 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"67645c97-088f-4e92-a739-3aaae5c02223","Type":"ContainerDied","Data":"54e58c9f9593a48db448e382962542109a1ba00fa703779ccbcafa558b225393"} Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.632232 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"67645c97-088f-4e92-a739-3aaae5c02223","Type":"ContainerDied","Data":"58077e05d9d9791431247cca23a93be7cedb56318b1abbf88a6c36ca118c39a1"} Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.632246 4812 scope.go:117] "RemoveContainer" containerID="8252c6a0ee4b59715df320268de65d5a2beb9b7f86fbd375c9f63d3c78b0bad4" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.672048 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.9260042349999997 podStartE2EDuration="4.672028824s" podCreationTimestamp="2025-11-25 17:05:25 +0000 UTC" firstStartedPulling="2025-11-25 17:05:26.720412274 +0000 UTC m=+1101.560554369" lastFinishedPulling="2025-11-25 17:05:27.466436863 +0000 UTC m=+1102.306578958" observedRunningTime="2025-11-25 17:05:29.669191126 +0000 UTC m=+1104.509333241" watchObservedRunningTime="2025-11-25 17:05:29.672028824 +0000 UTC m=+1104.512170919" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.674042 4812 scope.go:117] "RemoveContainer" containerID="54e58c9f9593a48db448e382962542109a1ba00fa703779ccbcafa558b225393" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.706025 4812 scope.go:117] "RemoveContainer" containerID="8252c6a0ee4b59715df320268de65d5a2beb9b7f86fbd375c9f63d3c78b0bad4" Nov 25 17:05:29 crc kubenswrapper[4812]: E1125 17:05:29.707362 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8252c6a0ee4b59715df320268de65d5a2beb9b7f86fbd375c9f63d3c78b0bad4\": container with ID starting with 8252c6a0ee4b59715df320268de65d5a2beb9b7f86fbd375c9f63d3c78b0bad4 not found: ID does not exist" containerID="8252c6a0ee4b59715df320268de65d5a2beb9b7f86fbd375c9f63d3c78b0bad4" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.707393 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8252c6a0ee4b59715df320268de65d5a2beb9b7f86fbd375c9f63d3c78b0bad4"} err="failed to get container status \"8252c6a0ee4b59715df320268de65d5a2beb9b7f86fbd375c9f63d3c78b0bad4\": rpc error: code = NotFound desc = could not find container \"8252c6a0ee4b59715df320268de65d5a2beb9b7f86fbd375c9f63d3c78b0bad4\": container with ID starting with 8252c6a0ee4b59715df320268de65d5a2beb9b7f86fbd375c9f63d3c78b0bad4 not found: ID does not exist" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.707417 4812 scope.go:117] "RemoveContainer" containerID="54e58c9f9593a48db448e382962542109a1ba00fa703779ccbcafa558b225393" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.708589 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 25 17:05:29 crc kubenswrapper[4812]: E1125 17:05:29.712420 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"54e58c9f9593a48db448e382962542109a1ba00fa703779ccbcafa558b225393\": container with ID starting with 54e58c9f9593a48db448e382962542109a1ba00fa703779ccbcafa558b225393 not found: ID does not exist" containerID="54e58c9f9593a48db448e382962542109a1ba00fa703779ccbcafa558b225393" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.712474 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54e58c9f9593a48db448e382962542109a1ba00fa703779ccbcafa558b225393"} err="failed to get container status \"54e58c9f9593a48db448e382962542109a1ba00fa703779ccbcafa558b225393\": rpc error: code = NotFound desc = could not find container \"54e58c9f9593a48db448e382962542109a1ba00fa703779ccbcafa558b225393\": container with ID starting with 54e58c9f9593a48db448e382962542109a1ba00fa703779ccbcafa558b225393 not found: ID does not exist" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.713927 4812 scope.go:117] "RemoveContainer" containerID="8252c6a0ee4b59715df320268de65d5a2beb9b7f86fbd375c9f63d3c78b0bad4" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.714801 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8252c6a0ee4b59715df320268de65d5a2beb9b7f86fbd375c9f63d3c78b0bad4"} err="failed to get container status \"8252c6a0ee4b59715df320268de65d5a2beb9b7f86fbd375c9f63d3c78b0bad4\": rpc error: code = NotFound desc = could not find container \"8252c6a0ee4b59715df320268de65d5a2beb9b7f86fbd375c9f63d3c78b0bad4\": container with ID starting with 8252c6a0ee4b59715df320268de65d5a2beb9b7f86fbd375c9f63d3c78b0bad4 not found: ID does not exist" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.714851 4812 scope.go:117] "RemoveContainer" containerID="54e58c9f9593a48db448e382962542109a1ba00fa703779ccbcafa558b225393" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.715754 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54e58c9f9593a48db448e382962542109a1ba00fa703779ccbcafa558b225393"} err="failed to get container status \"54e58c9f9593a48db448e382962542109a1ba00fa703779ccbcafa558b225393\": rpc error: code = NotFound desc = could not find container \"54e58c9f9593a48db448e382962542109a1ba00fa703779ccbcafa558b225393\": container with ID starting with 54e58c9f9593a48db448e382962542109a1ba00fa703779ccbcafa558b225393 not found: ID does not exist" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.740159 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.756246 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 25 17:05:29 crc kubenswrapper[4812]: E1125 17:05:29.756888 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67645c97-088f-4e92-a739-3aaae5c02223" containerName="cinder-api-log" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.757160 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="67645c97-088f-4e92-a739-3aaae5c02223" containerName="cinder-api-log" Nov 25 17:05:29 crc kubenswrapper[4812]: E1125 17:05:29.757316 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67645c97-088f-4e92-a739-3aaae5c02223" containerName="cinder-api" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.757331 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="67645c97-088f-4e92-a739-3aaae5c02223" containerName="cinder-api" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.757860 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="67645c97-088f-4e92-a739-3aaae5c02223" containerName="cinder-api" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.757892 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="67645c97-088f-4e92-a739-3aaae5c02223" containerName="cinder-api-log" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.760720 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.765906 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.766109 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.766228 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.766288 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.842030 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="67645c97-088f-4e92-a739-3aaae5c02223" path="/var/lib/kubelet/pods/67645c97-088f-4e92-a739-3aaae5c02223/volumes" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.886910 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2e789a7a-c20d-46f6-ad46-76c06658fb28-etc-machine-id\") pod \"cinder-api-0\" (UID: \"2e789a7a-c20d-46f6-ad46-76c06658fb28\") " pod="openstack/cinder-api-0" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.886965 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qmmd4\" (UniqueName: \"kubernetes.io/projected/2e789a7a-c20d-46f6-ad46-76c06658fb28-kube-api-access-qmmd4\") pod \"cinder-api-0\" (UID: \"2e789a7a-c20d-46f6-ad46-76c06658fb28\") " pod="openstack/cinder-api-0" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.887026 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2e789a7a-c20d-46f6-ad46-76c06658fb28-logs\") pod \"cinder-api-0\" (UID: \"2e789a7a-c20d-46f6-ad46-76c06658fb28\") " pod="openstack/cinder-api-0" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.887067 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2e789a7a-c20d-46f6-ad46-76c06658fb28-config-data-custom\") pod \"cinder-api-0\" (UID: \"2e789a7a-c20d-46f6-ad46-76c06658fb28\") " pod="openstack/cinder-api-0" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.887112 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2e789a7a-c20d-46f6-ad46-76c06658fb28-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"2e789a7a-c20d-46f6-ad46-76c06658fb28\") " pod="openstack/cinder-api-0" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.887165 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e789a7a-c20d-46f6-ad46-76c06658fb28-config-data\") pod \"cinder-api-0\" (UID: \"2e789a7a-c20d-46f6-ad46-76c06658fb28\") " pod="openstack/cinder-api-0" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.887243 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2e789a7a-c20d-46f6-ad46-76c06658fb28-scripts\") pod \"cinder-api-0\" (UID: \"2e789a7a-c20d-46f6-ad46-76c06658fb28\") " pod="openstack/cinder-api-0" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.887327 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e789a7a-c20d-46f6-ad46-76c06658fb28-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"2e789a7a-c20d-46f6-ad46-76c06658fb28\") " pod="openstack/cinder-api-0" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.887355 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2e789a7a-c20d-46f6-ad46-76c06658fb28-public-tls-certs\") pod \"cinder-api-0\" (UID: \"2e789a7a-c20d-46f6-ad46-76c06658fb28\") " pod="openstack/cinder-api-0" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.988698 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e789a7a-c20d-46f6-ad46-76c06658fb28-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"2e789a7a-c20d-46f6-ad46-76c06658fb28\") " pod="openstack/cinder-api-0" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.988747 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2e789a7a-c20d-46f6-ad46-76c06658fb28-public-tls-certs\") pod \"cinder-api-0\" (UID: \"2e789a7a-c20d-46f6-ad46-76c06658fb28\") " pod="openstack/cinder-api-0" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.988792 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2e789a7a-c20d-46f6-ad46-76c06658fb28-etc-machine-id\") pod \"cinder-api-0\" (UID: \"2e789a7a-c20d-46f6-ad46-76c06658fb28\") " pod="openstack/cinder-api-0" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.988808 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qmmd4\" (UniqueName: \"kubernetes.io/projected/2e789a7a-c20d-46f6-ad46-76c06658fb28-kube-api-access-qmmd4\") pod \"cinder-api-0\" (UID: \"2e789a7a-c20d-46f6-ad46-76c06658fb28\") " pod="openstack/cinder-api-0" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.988841 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2e789a7a-c20d-46f6-ad46-76c06658fb28-logs\") pod \"cinder-api-0\" (UID: \"2e789a7a-c20d-46f6-ad46-76c06658fb28\") " pod="openstack/cinder-api-0" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.988868 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2e789a7a-c20d-46f6-ad46-76c06658fb28-config-data-custom\") pod \"cinder-api-0\" (UID: \"2e789a7a-c20d-46f6-ad46-76c06658fb28\") " pod="openstack/cinder-api-0" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.988885 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2e789a7a-c20d-46f6-ad46-76c06658fb28-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"2e789a7a-c20d-46f6-ad46-76c06658fb28\") " pod="openstack/cinder-api-0" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.988914 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e789a7a-c20d-46f6-ad46-76c06658fb28-config-data\") pod \"cinder-api-0\" (UID: \"2e789a7a-c20d-46f6-ad46-76c06658fb28\") " pod="openstack/cinder-api-0" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.988952 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2e789a7a-c20d-46f6-ad46-76c06658fb28-scripts\") pod \"cinder-api-0\" (UID: \"2e789a7a-c20d-46f6-ad46-76c06658fb28\") " pod="openstack/cinder-api-0" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.989906 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2e789a7a-c20d-46f6-ad46-76c06658fb28-etc-machine-id\") pod \"cinder-api-0\" (UID: \"2e789a7a-c20d-46f6-ad46-76c06658fb28\") " pod="openstack/cinder-api-0" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.990191 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2e789a7a-c20d-46f6-ad46-76c06658fb28-logs\") pod \"cinder-api-0\" (UID: \"2e789a7a-c20d-46f6-ad46-76c06658fb28\") " pod="openstack/cinder-api-0" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.992428 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2e789a7a-c20d-46f6-ad46-76c06658fb28-scripts\") pod \"cinder-api-0\" (UID: \"2e789a7a-c20d-46f6-ad46-76c06658fb28\") " pod="openstack/cinder-api-0" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.992849 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2e789a7a-c20d-46f6-ad46-76c06658fb28-config-data-custom\") pod \"cinder-api-0\" (UID: \"2e789a7a-c20d-46f6-ad46-76c06658fb28\") " pod="openstack/cinder-api-0" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.993074 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e789a7a-c20d-46f6-ad46-76c06658fb28-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"2e789a7a-c20d-46f6-ad46-76c06658fb28\") " pod="openstack/cinder-api-0" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.993251 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2e789a7a-c20d-46f6-ad46-76c06658fb28-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"2e789a7a-c20d-46f6-ad46-76c06658fb28\") " pod="openstack/cinder-api-0" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.993755 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2e789a7a-c20d-46f6-ad46-76c06658fb28-public-tls-certs\") pod \"cinder-api-0\" (UID: \"2e789a7a-c20d-46f6-ad46-76c06658fb28\") " pod="openstack/cinder-api-0" Nov 25 17:05:29 crc kubenswrapper[4812]: I1125 17:05:29.995143 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e789a7a-c20d-46f6-ad46-76c06658fb28-config-data\") pod \"cinder-api-0\" (UID: \"2e789a7a-c20d-46f6-ad46-76c06658fb28\") " pod="openstack/cinder-api-0" Nov 25 17:05:30 crc kubenswrapper[4812]: I1125 17:05:30.009164 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qmmd4\" (UniqueName: \"kubernetes.io/projected/2e789a7a-c20d-46f6-ad46-76c06658fb28-kube-api-access-qmmd4\") pod \"cinder-api-0\" (UID: \"2e789a7a-c20d-46f6-ad46-76c06658fb28\") " pod="openstack/cinder-api-0" Nov 25 17:05:30 crc kubenswrapper[4812]: I1125 17:05:30.085301 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 17:05:30 crc kubenswrapper[4812]: I1125 17:05:30.517399 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 17:05:30 crc kubenswrapper[4812]: W1125 17:05:30.521838 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2e789a7a_c20d_46f6_ad46_76c06658fb28.slice/crio-14854b2c4c9ee6f693cca6375c3b16ce520bfbae6a1871cbae0b53f7fb0a9f2a WatchSource:0}: Error finding container 14854b2c4c9ee6f693cca6375c3b16ce520bfbae6a1871cbae0b53f7fb0a9f2a: Status 404 returned error can't find the container with id 14854b2c4c9ee6f693cca6375c3b16ce520bfbae6a1871cbae0b53f7fb0a9f2a Nov 25 17:05:30 crc kubenswrapper[4812]: I1125 17:05:30.643738 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2e789a7a-c20d-46f6-ad46-76c06658fb28","Type":"ContainerStarted","Data":"14854b2c4c9ee6f693cca6375c3b16ce520bfbae6a1871cbae0b53f7fb0a9f2a"} Nov 25 17:05:31 crc kubenswrapper[4812]: I1125 17:05:31.183289 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 25 17:05:31 crc kubenswrapper[4812]: I1125 17:05:31.655207 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2e789a7a-c20d-46f6-ad46-76c06658fb28","Type":"ContainerStarted","Data":"d939f2ce39ad06b9268522f45a6d05a41f1a52960eb17b748aa4a24b4b0bf517"} Nov 25 17:05:31 crc kubenswrapper[4812]: I1125 17:05:31.937438 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 25 17:05:31 crc kubenswrapper[4812]: I1125 17:05:31.941213 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 17:05:31 crc kubenswrapper[4812]: I1125 17:05:31.946777 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-hb42k" Nov 25 17:05:31 crc kubenswrapper[4812]: I1125 17:05:31.946863 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 25 17:05:31 crc kubenswrapper[4812]: I1125 17:05:31.947070 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 25 17:05:31 crc kubenswrapper[4812]: I1125 17:05:31.951144 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 25 17:05:32 crc kubenswrapper[4812]: I1125 17:05:32.039922 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g2ns4\" (UniqueName: \"kubernetes.io/projected/528b2f5b-a19a-48e1-a793-f66848d935fc-kube-api-access-g2ns4\") pod \"openstackclient\" (UID: \"528b2f5b-a19a-48e1-a793-f66848d935fc\") " pod="openstack/openstackclient" Nov 25 17:05:32 crc kubenswrapper[4812]: I1125 17:05:32.039979 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/528b2f5b-a19a-48e1-a793-f66848d935fc-openstack-config-secret\") pod \"openstackclient\" (UID: \"528b2f5b-a19a-48e1-a793-f66848d935fc\") " pod="openstack/openstackclient" Nov 25 17:05:32 crc kubenswrapper[4812]: I1125 17:05:32.040169 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/528b2f5b-a19a-48e1-a793-f66848d935fc-combined-ca-bundle\") pod \"openstackclient\" (UID: \"528b2f5b-a19a-48e1-a793-f66848d935fc\") " pod="openstack/openstackclient" Nov 25 17:05:32 crc kubenswrapper[4812]: I1125 17:05:32.040272 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/528b2f5b-a19a-48e1-a793-f66848d935fc-openstack-config\") pod \"openstackclient\" (UID: \"528b2f5b-a19a-48e1-a793-f66848d935fc\") " pod="openstack/openstackclient" Nov 25 17:05:32 crc kubenswrapper[4812]: I1125 17:05:32.141963 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/528b2f5b-a19a-48e1-a793-f66848d935fc-combined-ca-bundle\") pod \"openstackclient\" (UID: \"528b2f5b-a19a-48e1-a793-f66848d935fc\") " pod="openstack/openstackclient" Nov 25 17:05:32 crc kubenswrapper[4812]: I1125 17:05:32.142048 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/528b2f5b-a19a-48e1-a793-f66848d935fc-openstack-config\") pod \"openstackclient\" (UID: \"528b2f5b-a19a-48e1-a793-f66848d935fc\") " pod="openstack/openstackclient" Nov 25 17:05:32 crc kubenswrapper[4812]: I1125 17:05:32.142141 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g2ns4\" (UniqueName: \"kubernetes.io/projected/528b2f5b-a19a-48e1-a793-f66848d935fc-kube-api-access-g2ns4\") pod \"openstackclient\" (UID: \"528b2f5b-a19a-48e1-a793-f66848d935fc\") " pod="openstack/openstackclient" Nov 25 17:05:32 crc kubenswrapper[4812]: I1125 17:05:32.142159 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/528b2f5b-a19a-48e1-a793-f66848d935fc-openstack-config-secret\") pod \"openstackclient\" (UID: \"528b2f5b-a19a-48e1-a793-f66848d935fc\") " pod="openstack/openstackclient" Nov 25 17:05:32 crc kubenswrapper[4812]: I1125 17:05:32.143145 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/528b2f5b-a19a-48e1-a793-f66848d935fc-openstack-config\") pod \"openstackclient\" (UID: \"528b2f5b-a19a-48e1-a793-f66848d935fc\") " pod="openstack/openstackclient" Nov 25 17:05:32 crc kubenswrapper[4812]: I1125 17:05:32.148470 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/528b2f5b-a19a-48e1-a793-f66848d935fc-combined-ca-bundle\") pod \"openstackclient\" (UID: \"528b2f5b-a19a-48e1-a793-f66848d935fc\") " pod="openstack/openstackclient" Nov 25 17:05:32 crc kubenswrapper[4812]: I1125 17:05:32.156106 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/528b2f5b-a19a-48e1-a793-f66848d935fc-openstack-config-secret\") pod \"openstackclient\" (UID: \"528b2f5b-a19a-48e1-a793-f66848d935fc\") " pod="openstack/openstackclient" Nov 25 17:05:32 crc kubenswrapper[4812]: I1125 17:05:32.163810 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g2ns4\" (UniqueName: \"kubernetes.io/projected/528b2f5b-a19a-48e1-a793-f66848d935fc-kube-api-access-g2ns4\") pod \"openstackclient\" (UID: \"528b2f5b-a19a-48e1-a793-f66848d935fc\") " pod="openstack/openstackclient" Nov 25 17:05:32 crc kubenswrapper[4812]: I1125 17:05:32.273062 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 17:05:32 crc kubenswrapper[4812]: I1125 17:05:32.665421 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2e789a7a-c20d-46f6-ad46-76c06658fb28","Type":"ContainerStarted","Data":"8e28ed2dfa40a9268363ab2aae788fe85d3de59aaa130dddd12ade2f8ee88fdb"} Nov 25 17:05:32 crc kubenswrapper[4812]: I1125 17:05:32.665829 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 25 17:05:32 crc kubenswrapper[4812]: I1125 17:05:32.695627 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.69559968 podStartE2EDuration="3.69559968s" podCreationTimestamp="2025-11-25 17:05:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:05:32.68818021 +0000 UTC m=+1107.528322295" watchObservedRunningTime="2025-11-25 17:05:32.69559968 +0000 UTC m=+1107.535741775" Nov 25 17:05:32 crc kubenswrapper[4812]: I1125 17:05:32.722151 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 25 17:05:33 crc kubenswrapper[4812]: I1125 17:05:33.677309 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"528b2f5b-a19a-48e1-a793-f66848d935fc","Type":"ContainerStarted","Data":"04c44db2283ed23928364c766f84b61d5147853fe30ae82f082ef6f99c53ca56"} Nov 25 17:05:36 crc kubenswrapper[4812]: I1125 17:05:36.052147 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:05:36 crc kubenswrapper[4812]: I1125 17:05:36.053373 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8e906f47-2af7-4fae-8aa2-9bde21c41f5b" containerName="ceilometer-central-agent" containerID="cri-o://80dfeb84ca827cd19f2bdee18aecb4ad1b32c4039d1200d483d4478467326750" gracePeriod=30 Nov 25 17:05:36 crc kubenswrapper[4812]: I1125 17:05:36.053418 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8e906f47-2af7-4fae-8aa2-9bde21c41f5b" containerName="ceilometer-notification-agent" containerID="cri-o://59b3568160a9648030cb271bf431816764b7a5f8ebbff9c08a6dadcbad303a3c" gracePeriod=30 Nov 25 17:05:36 crc kubenswrapper[4812]: I1125 17:05:36.053431 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8e906f47-2af7-4fae-8aa2-9bde21c41f5b" containerName="sg-core" containerID="cri-o://406148fd979e443955257808ae32b0c8fb09c037f1077e0f4f8e629c8e6ae834" gracePeriod=30 Nov 25 17:05:36 crc kubenswrapper[4812]: I1125 17:05:36.053411 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8e906f47-2af7-4fae-8aa2-9bde21c41f5b" containerName="proxy-httpd" containerID="cri-o://410f288b8d4db28b8d6f4dafb693da59d1ef5c8429f207007b9ceded74b4dafe" gracePeriod=30 Nov 25 17:05:36 crc kubenswrapper[4812]: I1125 17:05:36.057838 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 25 17:05:36 crc kubenswrapper[4812]: I1125 17:05:36.192694 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6d97fcdd8f-55tn6" Nov 25 17:05:36 crc kubenswrapper[4812]: I1125 17:05:36.261769 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bb684768f-q79bp"] Nov 25 17:05:36 crc kubenswrapper[4812]: I1125 17:05:36.262000 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6bb684768f-q79bp" podUID="6c9583db-6c4d-4ab8-b076-5f01d3f40f30" containerName="dnsmasq-dns" containerID="cri-o://55791fad9db2664f7bac5119aeda9388ffda8c6a4b4e792e24a6861ec84d00d3" gracePeriod=10 Nov 25 17:05:36 crc kubenswrapper[4812]: I1125 17:05:36.531662 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 25 17:05:36 crc kubenswrapper[4812]: I1125 17:05:36.606757 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 17:05:36 crc kubenswrapper[4812]: I1125 17:05:36.709922 4812 generic.go:334] "Generic (PLEG): container finished" podID="8e906f47-2af7-4fae-8aa2-9bde21c41f5b" containerID="410f288b8d4db28b8d6f4dafb693da59d1ef5c8429f207007b9ceded74b4dafe" exitCode=0 Nov 25 17:05:36 crc kubenswrapper[4812]: I1125 17:05:36.709959 4812 generic.go:334] "Generic (PLEG): container finished" podID="8e906f47-2af7-4fae-8aa2-9bde21c41f5b" containerID="406148fd979e443955257808ae32b0c8fb09c037f1077e0f4f8e629c8e6ae834" exitCode=2 Nov 25 17:05:36 crc kubenswrapper[4812]: I1125 17:05:36.709971 4812 generic.go:334] "Generic (PLEG): container finished" podID="8e906f47-2af7-4fae-8aa2-9bde21c41f5b" containerID="80dfeb84ca827cd19f2bdee18aecb4ad1b32c4039d1200d483d4478467326750" exitCode=0 Nov 25 17:05:36 crc kubenswrapper[4812]: I1125 17:05:36.709963 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8e906f47-2af7-4fae-8aa2-9bde21c41f5b","Type":"ContainerDied","Data":"410f288b8d4db28b8d6f4dafb693da59d1ef5c8429f207007b9ceded74b4dafe"} Nov 25 17:05:36 crc kubenswrapper[4812]: I1125 17:05:36.710007 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8e906f47-2af7-4fae-8aa2-9bde21c41f5b","Type":"ContainerDied","Data":"406148fd979e443955257808ae32b0c8fb09c037f1077e0f4f8e629c8e6ae834"} Nov 25 17:05:36 crc kubenswrapper[4812]: I1125 17:05:36.710037 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8e906f47-2af7-4fae-8aa2-9bde21c41f5b","Type":"ContainerDied","Data":"80dfeb84ca827cd19f2bdee18aecb4ad1b32c4039d1200d483d4478467326750"} Nov 25 17:05:36 crc kubenswrapper[4812]: I1125 17:05:36.711790 4812 generic.go:334] "Generic (PLEG): container finished" podID="6c9583db-6c4d-4ab8-b076-5f01d3f40f30" containerID="55791fad9db2664f7bac5119aeda9388ffda8c6a4b4e792e24a6861ec84d00d3" exitCode=0 Nov 25 17:05:36 crc kubenswrapper[4812]: I1125 17:05:36.711910 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb684768f-q79bp" event={"ID":"6c9583db-6c4d-4ab8-b076-5f01d3f40f30","Type":"ContainerDied","Data":"55791fad9db2664f7bac5119aeda9388ffda8c6a4b4e792e24a6861ec84d00d3"} Nov 25 17:05:36 crc kubenswrapper[4812]: I1125 17:05:36.712044 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="d9186fd5-246f-4bb3-b3b8-926d6d66ed25" containerName="cinder-scheduler" containerID="cri-o://7d1a098a6b47e744784d9e942af465edc316f42eef7e52d2632632bdf6008bc8" gracePeriod=30 Nov 25 17:05:36 crc kubenswrapper[4812]: I1125 17:05:36.712252 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="d9186fd5-246f-4bb3-b3b8-926d6d66ed25" containerName="probe" containerID="cri-o://eedc0473e6463e813a9e85f807e7a5e58cd743e9f12503905ea316bd03e0b40d" gracePeriod=30 Nov 25 17:05:37 crc kubenswrapper[4812]: I1125 17:05:37.730371 4812 generic.go:334] "Generic (PLEG): container finished" podID="d9186fd5-246f-4bb3-b3b8-926d6d66ed25" containerID="eedc0473e6463e813a9e85f807e7a5e58cd743e9f12503905ea316bd03e0b40d" exitCode=0 Nov 25 17:05:37 crc kubenswrapper[4812]: I1125 17:05:37.730460 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d9186fd5-246f-4bb3-b3b8-926d6d66ed25","Type":"ContainerDied","Data":"eedc0473e6463e813a9e85f807e7a5e58cd743e9f12503905ea316bd03e0b40d"} Nov 25 17:05:39 crc kubenswrapper[4812]: I1125 17:05:39.279829 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-6c8d9b5d8d-dhthn" Nov 25 17:05:39 crc kubenswrapper[4812]: I1125 17:05:39.753210 4812 generic.go:334] "Generic (PLEG): container finished" podID="d9186fd5-246f-4bb3-b3b8-926d6d66ed25" containerID="7d1a098a6b47e744784d9e942af465edc316f42eef7e52d2632632bdf6008bc8" exitCode=0 Nov 25 17:05:39 crc kubenswrapper[4812]: I1125 17:05:39.753250 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d9186fd5-246f-4bb3-b3b8-926d6d66ed25","Type":"ContainerDied","Data":"7d1a098a6b47e744784d9e942af465edc316f42eef7e52d2632632bdf6008bc8"} Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.265955 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-2tnh5"] Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.273721 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-2tnh5" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.302049 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-2tnh5"] Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.360510 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-2c67-account-create-2dzrx"] Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.362263 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-2c67-account-create-2dzrx" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.365692 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.374330 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-2c67-account-create-2dzrx"] Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.409586 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/941bf985-0c3b-4972-b4fe-182f891de32f-operator-scripts\") pod \"nova-api-db-create-2tnh5\" (UID: \"941bf985-0c3b-4972-b4fe-182f891de32f\") " pod="openstack/nova-api-db-create-2tnh5" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.409701 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-54278\" (UniqueName: \"kubernetes.io/projected/941bf985-0c3b-4972-b4fe-182f891de32f-kube-api-access-54278\") pod \"nova-api-db-create-2tnh5\" (UID: \"941bf985-0c3b-4972-b4fe-182f891de32f\") " pod="openstack/nova-api-db-create-2tnh5" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.462043 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-pgm8z"] Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.464893 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-pgm8z" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.471934 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-pgm8z"] Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.511443 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-54278\" (UniqueName: \"kubernetes.io/projected/941bf985-0c3b-4972-b4fe-182f891de32f-kube-api-access-54278\") pod \"nova-api-db-create-2tnh5\" (UID: \"941bf985-0c3b-4972-b4fe-182f891de32f\") " pod="openstack/nova-api-db-create-2tnh5" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.511515 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8hkfd\" (UniqueName: \"kubernetes.io/projected/0a32a31d-c114-4e2d-a8d8-5a6c0f0e518b-kube-api-access-8hkfd\") pod \"nova-api-2c67-account-create-2dzrx\" (UID: \"0a32a31d-c114-4e2d-a8d8-5a6c0f0e518b\") " pod="openstack/nova-api-2c67-account-create-2dzrx" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.511602 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0a32a31d-c114-4e2d-a8d8-5a6c0f0e518b-operator-scripts\") pod \"nova-api-2c67-account-create-2dzrx\" (UID: \"0a32a31d-c114-4e2d-a8d8-5a6c0f0e518b\") " pod="openstack/nova-api-2c67-account-create-2dzrx" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.511691 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/941bf985-0c3b-4972-b4fe-182f891de32f-operator-scripts\") pod \"nova-api-db-create-2tnh5\" (UID: \"941bf985-0c3b-4972-b4fe-182f891de32f\") " pod="openstack/nova-api-db-create-2tnh5" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.512460 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/941bf985-0c3b-4972-b4fe-182f891de32f-operator-scripts\") pod \"nova-api-db-create-2tnh5\" (UID: \"941bf985-0c3b-4972-b4fe-182f891de32f\") " pod="openstack/nova-api-db-create-2tnh5" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.557749 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-54278\" (UniqueName: \"kubernetes.io/projected/941bf985-0c3b-4972-b4fe-182f891de32f-kube-api-access-54278\") pod \"nova-api-db-create-2tnh5\" (UID: \"941bf985-0c3b-4972-b4fe-182f891de32f\") " pod="openstack/nova-api-db-create-2tnh5" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.575360 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-8jrgd"] Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.576912 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-8jrgd" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.587125 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-8jrgd"] Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.603945 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-66bc-account-create-9fxvs"] Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.605188 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-66bc-account-create-9fxvs" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.608229 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.612763 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4wbxs\" (UniqueName: \"kubernetes.io/projected/64f71340-850a-4cf0-880a-5760dc7e1c4f-kube-api-access-4wbxs\") pod \"nova-cell0-db-create-pgm8z\" (UID: \"64f71340-850a-4cf0-880a-5760dc7e1c4f\") " pod="openstack/nova-cell0-db-create-pgm8z" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.612844 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0a32a31d-c114-4e2d-a8d8-5a6c0f0e518b-operator-scripts\") pod \"nova-api-2c67-account-create-2dzrx\" (UID: \"0a32a31d-c114-4e2d-a8d8-5a6c0f0e518b\") " pod="openstack/nova-api-2c67-account-create-2dzrx" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.612923 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/64f71340-850a-4cf0-880a-5760dc7e1c4f-operator-scripts\") pod \"nova-cell0-db-create-pgm8z\" (UID: \"64f71340-850a-4cf0-880a-5760dc7e1c4f\") " pod="openstack/nova-cell0-db-create-pgm8z" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.613029 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8hkfd\" (UniqueName: \"kubernetes.io/projected/0a32a31d-c114-4e2d-a8d8-5a6c0f0e518b-kube-api-access-8hkfd\") pod \"nova-api-2c67-account-create-2dzrx\" (UID: \"0a32a31d-c114-4e2d-a8d8-5a6c0f0e518b\") " pod="openstack/nova-api-2c67-account-create-2dzrx" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.613443 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-66bc-account-create-9fxvs"] Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.614109 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0a32a31d-c114-4e2d-a8d8-5a6c0f0e518b-operator-scripts\") pod \"nova-api-2c67-account-create-2dzrx\" (UID: \"0a32a31d-c114-4e2d-a8d8-5a6c0f0e518b\") " pod="openstack/nova-api-2c67-account-create-2dzrx" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.623249 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-2tnh5" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.650105 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8hkfd\" (UniqueName: \"kubernetes.io/projected/0a32a31d-c114-4e2d-a8d8-5a6c0f0e518b-kube-api-access-8hkfd\") pod \"nova-api-2c67-account-create-2dzrx\" (UID: \"0a32a31d-c114-4e2d-a8d8-5a6c0f0e518b\") " pod="openstack/nova-api-2c67-account-create-2dzrx" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.702981 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-2c67-account-create-2dzrx" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.715098 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3ffbf750-9ce8-454e-a031-eb09ebaa34a5-operator-scripts\") pod \"nova-cell1-db-create-8jrgd\" (UID: \"3ffbf750-9ce8-454e-a031-eb09ebaa34a5\") " pod="openstack/nova-cell1-db-create-8jrgd" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.715172 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d4d2ad77-439d-4039-98b1-f26eadcc542e-operator-scripts\") pod \"nova-cell0-66bc-account-create-9fxvs\" (UID: \"d4d2ad77-439d-4039-98b1-f26eadcc542e\") " pod="openstack/nova-cell0-66bc-account-create-9fxvs" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.715422 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4wbxs\" (UniqueName: \"kubernetes.io/projected/64f71340-850a-4cf0-880a-5760dc7e1c4f-kube-api-access-4wbxs\") pod \"nova-cell0-db-create-pgm8z\" (UID: \"64f71340-850a-4cf0-880a-5760dc7e1c4f\") " pod="openstack/nova-cell0-db-create-pgm8z" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.715546 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8hx28\" (UniqueName: \"kubernetes.io/projected/d4d2ad77-439d-4039-98b1-f26eadcc542e-kube-api-access-8hx28\") pod \"nova-cell0-66bc-account-create-9fxvs\" (UID: \"d4d2ad77-439d-4039-98b1-f26eadcc542e\") " pod="openstack/nova-cell0-66bc-account-create-9fxvs" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.715663 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gz9bc\" (UniqueName: \"kubernetes.io/projected/3ffbf750-9ce8-454e-a031-eb09ebaa34a5-kube-api-access-gz9bc\") pod \"nova-cell1-db-create-8jrgd\" (UID: \"3ffbf750-9ce8-454e-a031-eb09ebaa34a5\") " pod="openstack/nova-cell1-db-create-8jrgd" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.715708 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/64f71340-850a-4cf0-880a-5760dc7e1c4f-operator-scripts\") pod \"nova-cell0-db-create-pgm8z\" (UID: \"64f71340-850a-4cf0-880a-5760dc7e1c4f\") " pod="openstack/nova-cell0-db-create-pgm8z" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.716758 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/64f71340-850a-4cf0-880a-5760dc7e1c4f-operator-scripts\") pod \"nova-cell0-db-create-pgm8z\" (UID: \"64f71340-850a-4cf0-880a-5760dc7e1c4f\") " pod="openstack/nova-cell0-db-create-pgm8z" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.752572 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4wbxs\" (UniqueName: \"kubernetes.io/projected/64f71340-850a-4cf0-880a-5760dc7e1c4f-kube-api-access-4wbxs\") pod \"nova-cell0-db-create-pgm8z\" (UID: \"64f71340-850a-4cf0-880a-5760dc7e1c4f\") " pod="openstack/nova-cell0-db-create-pgm8z" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.766872 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cff9-account-create-772bb"] Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.768259 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cff9-account-create-772bb" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.770672 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.780973 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-pgm8z" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.785771 4812 generic.go:334] "Generic (PLEG): container finished" podID="8e906f47-2af7-4fae-8aa2-9bde21c41f5b" containerID="59b3568160a9648030cb271bf431816764b7a5f8ebbff9c08a6dadcbad303a3c" exitCode=0 Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.785822 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8e906f47-2af7-4fae-8aa2-9bde21c41f5b","Type":"ContainerDied","Data":"59b3568160a9648030cb271bf431816764b7a5f8ebbff9c08a6dadcbad303a3c"} Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.793718 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cff9-account-create-772bb"] Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.817351 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3ffbf750-9ce8-454e-a031-eb09ebaa34a5-operator-scripts\") pod \"nova-cell1-db-create-8jrgd\" (UID: \"3ffbf750-9ce8-454e-a031-eb09ebaa34a5\") " pod="openstack/nova-cell1-db-create-8jrgd" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.817412 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d4d2ad77-439d-4039-98b1-f26eadcc542e-operator-scripts\") pod \"nova-cell0-66bc-account-create-9fxvs\" (UID: \"d4d2ad77-439d-4039-98b1-f26eadcc542e\") " pod="openstack/nova-cell0-66bc-account-create-9fxvs" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.817473 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8hx28\" (UniqueName: \"kubernetes.io/projected/d4d2ad77-439d-4039-98b1-f26eadcc542e-kube-api-access-8hx28\") pod \"nova-cell0-66bc-account-create-9fxvs\" (UID: \"d4d2ad77-439d-4039-98b1-f26eadcc542e\") " pod="openstack/nova-cell0-66bc-account-create-9fxvs" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.817547 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gz9bc\" (UniqueName: \"kubernetes.io/projected/3ffbf750-9ce8-454e-a031-eb09ebaa34a5-kube-api-access-gz9bc\") pod \"nova-cell1-db-create-8jrgd\" (UID: \"3ffbf750-9ce8-454e-a031-eb09ebaa34a5\") " pod="openstack/nova-cell1-db-create-8jrgd" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.820288 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d4d2ad77-439d-4039-98b1-f26eadcc542e-operator-scripts\") pod \"nova-cell0-66bc-account-create-9fxvs\" (UID: \"d4d2ad77-439d-4039-98b1-f26eadcc542e\") " pod="openstack/nova-cell0-66bc-account-create-9fxvs" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.822364 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3ffbf750-9ce8-454e-a031-eb09ebaa34a5-operator-scripts\") pod \"nova-cell1-db-create-8jrgd\" (UID: \"3ffbf750-9ce8-454e-a031-eb09ebaa34a5\") " pod="openstack/nova-cell1-db-create-8jrgd" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.837001 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8hx28\" (UniqueName: \"kubernetes.io/projected/d4d2ad77-439d-4039-98b1-f26eadcc542e-kube-api-access-8hx28\") pod \"nova-cell0-66bc-account-create-9fxvs\" (UID: \"d4d2ad77-439d-4039-98b1-f26eadcc542e\") " pod="openstack/nova-cell0-66bc-account-create-9fxvs" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.838495 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gz9bc\" (UniqueName: \"kubernetes.io/projected/3ffbf750-9ce8-454e-a031-eb09ebaa34a5-kube-api-access-gz9bc\") pod \"nova-cell1-db-create-8jrgd\" (UID: \"3ffbf750-9ce8-454e-a031-eb09ebaa34a5\") " pod="openstack/nova-cell1-db-create-8jrgd" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.919150 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6ba390cc-ed29-4af9-b3b5-582f5c3de736-operator-scripts\") pod \"nova-cell1-cff9-account-create-772bb\" (UID: \"6ba390cc-ed29-4af9-b3b5-582f5c3de736\") " pod="openstack/nova-cell1-cff9-account-create-772bb" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.919281 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lklkn\" (UniqueName: \"kubernetes.io/projected/6ba390cc-ed29-4af9-b3b5-582f5c3de736-kube-api-access-lklkn\") pod \"nova-cell1-cff9-account-create-772bb\" (UID: \"6ba390cc-ed29-4af9-b3b5-582f5c3de736\") " pod="openstack/nova-cell1-cff9-account-create-772bb" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.940945 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-8jrgd" Nov 25 17:05:40 crc kubenswrapper[4812]: I1125 17:05:40.985922 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-66bc-account-create-9fxvs" Nov 25 17:05:41 crc kubenswrapper[4812]: I1125 17:05:41.021277 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6ba390cc-ed29-4af9-b3b5-582f5c3de736-operator-scripts\") pod \"nova-cell1-cff9-account-create-772bb\" (UID: \"6ba390cc-ed29-4af9-b3b5-582f5c3de736\") " pod="openstack/nova-cell1-cff9-account-create-772bb" Nov 25 17:05:41 crc kubenswrapper[4812]: I1125 17:05:41.021376 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lklkn\" (UniqueName: \"kubernetes.io/projected/6ba390cc-ed29-4af9-b3b5-582f5c3de736-kube-api-access-lklkn\") pod \"nova-cell1-cff9-account-create-772bb\" (UID: \"6ba390cc-ed29-4af9-b3b5-582f5c3de736\") " pod="openstack/nova-cell1-cff9-account-create-772bb" Nov 25 17:05:41 crc kubenswrapper[4812]: I1125 17:05:41.022286 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6ba390cc-ed29-4af9-b3b5-582f5c3de736-operator-scripts\") pod \"nova-cell1-cff9-account-create-772bb\" (UID: \"6ba390cc-ed29-4af9-b3b5-582f5c3de736\") " pod="openstack/nova-cell1-cff9-account-create-772bb" Nov 25 17:05:41 crc kubenswrapper[4812]: I1125 17:05:41.039662 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lklkn\" (UniqueName: \"kubernetes.io/projected/6ba390cc-ed29-4af9-b3b5-582f5c3de736-kube-api-access-lklkn\") pod \"nova-cell1-cff9-account-create-772bb\" (UID: \"6ba390cc-ed29-4af9-b3b5-582f5c3de736\") " pod="openstack/nova-cell1-cff9-account-create-772bb" Nov 25 17:05:41 crc kubenswrapper[4812]: I1125 17:05:41.106014 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 17:05:41 crc kubenswrapper[4812]: I1125 17:05:41.106660 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="bf71da98-d4be-4c2f-a900-118282c5fa5f" containerName="kube-state-metrics" containerID="cri-o://d4dd23b8b48fdb9581e3623ad5103b7fef3d71ddf8eeeab3214504b8b1ae57e9" gracePeriod=30 Nov 25 17:05:41 crc kubenswrapper[4812]: I1125 17:05:41.112370 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cff9-account-create-772bb" Nov 25 17:05:41 crc kubenswrapper[4812]: I1125 17:05:41.793448 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-7449f79865-5fw9f" Nov 25 17:05:41 crc kubenswrapper[4812]: I1125 17:05:41.796911 4812 generic.go:334] "Generic (PLEG): container finished" podID="bf71da98-d4be-4c2f-a900-118282c5fa5f" containerID="d4dd23b8b48fdb9581e3623ad5103b7fef3d71ddf8eeeab3214504b8b1ae57e9" exitCode=2 Nov 25 17:05:41 crc kubenswrapper[4812]: I1125 17:05:41.796958 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"bf71da98-d4be-4c2f-a900-118282c5fa5f","Type":"ContainerDied","Data":"d4dd23b8b48fdb9581e3623ad5103b7fef3d71ddf8eeeab3214504b8b1ae57e9"} Nov 25 17:05:41 crc kubenswrapper[4812]: I1125 17:05:41.859110 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-6c8d9b5d8d-dhthn"] Nov 25 17:05:41 crc kubenswrapper[4812]: I1125 17:05:41.859374 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-6c8d9b5d8d-dhthn" podUID="aff51d33-8e62-4c99-bc89-c6e53270b60c" containerName="neutron-api" containerID="cri-o://a3df4db6ec8e762e8d7d2749d9960042f5364cd1bd5885a4b434c0d687223402" gracePeriod=30 Nov 25 17:05:41 crc kubenswrapper[4812]: I1125 17:05:41.859719 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-6c8d9b5d8d-dhthn" podUID="aff51d33-8e62-4c99-bc89-c6e53270b60c" containerName="neutron-httpd" containerID="cri-o://abda29774ee0b1e64e94d61d01013714faa29d1bf9119b37ef10ca17ac6386ba" gracePeriod=30 Nov 25 17:05:42 crc kubenswrapper[4812]: I1125 17:05:42.306988 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 25 17:05:42 crc kubenswrapper[4812]: I1125 17:05:42.609388 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb684768f-q79bp" Nov 25 17:05:42 crc kubenswrapper[4812]: I1125 17:05:42.757573 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6c9583db-6c4d-4ab8-b076-5f01d3f40f30-ovsdbserver-sb\") pod \"6c9583db-6c4d-4ab8-b076-5f01d3f40f30\" (UID: \"6c9583db-6c4d-4ab8-b076-5f01d3f40f30\") " Nov 25 17:05:42 crc kubenswrapper[4812]: I1125 17:05:42.757680 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6c9583db-6c4d-4ab8-b076-5f01d3f40f30-ovsdbserver-nb\") pod \"6c9583db-6c4d-4ab8-b076-5f01d3f40f30\" (UID: \"6c9583db-6c4d-4ab8-b076-5f01d3f40f30\") " Nov 25 17:05:42 crc kubenswrapper[4812]: I1125 17:05:42.757831 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c9583db-6c4d-4ab8-b076-5f01d3f40f30-config\") pod \"6c9583db-6c4d-4ab8-b076-5f01d3f40f30\" (UID: \"6c9583db-6c4d-4ab8-b076-5f01d3f40f30\") " Nov 25 17:05:42 crc kubenswrapper[4812]: I1125 17:05:42.757865 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6c9583db-6c4d-4ab8-b076-5f01d3f40f30-dns-svc\") pod \"6c9583db-6c4d-4ab8-b076-5f01d3f40f30\" (UID: \"6c9583db-6c4d-4ab8-b076-5f01d3f40f30\") " Nov 25 17:05:42 crc kubenswrapper[4812]: I1125 17:05:42.758483 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n9cp9\" (UniqueName: \"kubernetes.io/projected/6c9583db-6c4d-4ab8-b076-5f01d3f40f30-kube-api-access-n9cp9\") pod \"6c9583db-6c4d-4ab8-b076-5f01d3f40f30\" (UID: \"6c9583db-6c4d-4ab8-b076-5f01d3f40f30\") " Nov 25 17:05:42 crc kubenswrapper[4812]: I1125 17:05:42.763921 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c9583db-6c4d-4ab8-b076-5f01d3f40f30-kube-api-access-n9cp9" (OuterVolumeSpecName: "kube-api-access-n9cp9") pod "6c9583db-6c4d-4ab8-b076-5f01d3f40f30" (UID: "6c9583db-6c4d-4ab8-b076-5f01d3f40f30"). InnerVolumeSpecName "kube-api-access-n9cp9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:05:42 crc kubenswrapper[4812]: I1125 17:05:42.833390 4812 generic.go:334] "Generic (PLEG): container finished" podID="aff51d33-8e62-4c99-bc89-c6e53270b60c" containerID="abda29774ee0b1e64e94d61d01013714faa29d1bf9119b37ef10ca17ac6386ba" exitCode=0 Nov 25 17:05:42 crc kubenswrapper[4812]: I1125 17:05:42.833759 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6c8d9b5d8d-dhthn" event={"ID":"aff51d33-8e62-4c99-bc89-c6e53270b60c","Type":"ContainerDied","Data":"abda29774ee0b1e64e94d61d01013714faa29d1bf9119b37ef10ca17ac6386ba"} Nov 25 17:05:42 crc kubenswrapper[4812]: I1125 17:05:42.838369 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb684768f-q79bp" event={"ID":"6c9583db-6c4d-4ab8-b076-5f01d3f40f30","Type":"ContainerDied","Data":"38c23bd134f029f3d881d7ecbd923dbd4cb208795c44e512c4b77c0a4e8744a5"} Nov 25 17:05:42 crc kubenswrapper[4812]: I1125 17:05:42.838422 4812 scope.go:117] "RemoveContainer" containerID="55791fad9db2664f7bac5119aeda9388ffda8c6a4b4e792e24a6861ec84d00d3" Nov 25 17:05:42 crc kubenswrapper[4812]: I1125 17:05:42.838557 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb684768f-q79bp" Nov 25 17:05:42 crc kubenswrapper[4812]: I1125 17:05:42.863470 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n9cp9\" (UniqueName: \"kubernetes.io/projected/6c9583db-6c4d-4ab8-b076-5f01d3f40f30-kube-api-access-n9cp9\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:42 crc kubenswrapper[4812]: I1125 17:05:42.886302 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c9583db-6c4d-4ab8-b076-5f01d3f40f30-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6c9583db-6c4d-4ab8-b076-5f01d3f40f30" (UID: "6c9583db-6c4d-4ab8-b076-5f01d3f40f30"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:05:42 crc kubenswrapper[4812]: I1125 17:05:42.912933 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c9583db-6c4d-4ab8-b076-5f01d3f40f30-config" (OuterVolumeSpecName: "config") pod "6c9583db-6c4d-4ab8-b076-5f01d3f40f30" (UID: "6c9583db-6c4d-4ab8-b076-5f01d3f40f30"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:05:42 crc kubenswrapper[4812]: I1125 17:05:42.915307 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c9583db-6c4d-4ab8-b076-5f01d3f40f30-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "6c9583db-6c4d-4ab8-b076-5f01d3f40f30" (UID: "6c9583db-6c4d-4ab8-b076-5f01d3f40f30"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:05:42 crc kubenswrapper[4812]: I1125 17:05:42.923575 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c9583db-6c4d-4ab8-b076-5f01d3f40f30-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "6c9583db-6c4d-4ab8-b076-5f01d3f40f30" (UID: "6c9583db-6c4d-4ab8-b076-5f01d3f40f30"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:05:42 crc kubenswrapper[4812]: I1125 17:05:42.943670 4812 scope.go:117] "RemoveContainer" containerID="68508a893204d94021ab681d245ec2b9e3d72cf27a6d7c2d48d5a05d45600267" Nov 25 17:05:42 crc kubenswrapper[4812]: I1125 17:05:42.970635 4812 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6c9583db-6c4d-4ab8-b076-5f01d3f40f30-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:42 crc kubenswrapper[4812]: I1125 17:05:42.970949 4812 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c9583db-6c4d-4ab8-b076-5f01d3f40f30-config\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:42 crc kubenswrapper[4812]: I1125 17:05:42.970963 4812 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6c9583db-6c4d-4ab8-b076-5f01d3f40f30-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:42 crc kubenswrapper[4812]: I1125 17:05:42.970975 4812 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6c9583db-6c4d-4ab8-b076-5f01d3f40f30-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:42 crc kubenswrapper[4812]: I1125 17:05:42.981228 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.072182 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-log-httpd\") pod \"8e906f47-2af7-4fae-8aa2-9bde21c41f5b\" (UID: \"8e906f47-2af7-4fae-8aa2-9bde21c41f5b\") " Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.072344 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-combined-ca-bundle\") pod \"8e906f47-2af7-4fae-8aa2-9bde21c41f5b\" (UID: \"8e906f47-2af7-4fae-8aa2-9bde21c41f5b\") " Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.072402 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2n6zv\" (UniqueName: \"kubernetes.io/projected/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-kube-api-access-2n6zv\") pod \"8e906f47-2af7-4fae-8aa2-9bde21c41f5b\" (UID: \"8e906f47-2af7-4fae-8aa2-9bde21c41f5b\") " Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.072468 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-sg-core-conf-yaml\") pod \"8e906f47-2af7-4fae-8aa2-9bde21c41f5b\" (UID: \"8e906f47-2af7-4fae-8aa2-9bde21c41f5b\") " Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.072500 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-scripts\") pod \"8e906f47-2af7-4fae-8aa2-9bde21c41f5b\" (UID: \"8e906f47-2af7-4fae-8aa2-9bde21c41f5b\") " Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.072551 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-config-data\") pod \"8e906f47-2af7-4fae-8aa2-9bde21c41f5b\" (UID: \"8e906f47-2af7-4fae-8aa2-9bde21c41f5b\") " Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.072600 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-run-httpd\") pod \"8e906f47-2af7-4fae-8aa2-9bde21c41f5b\" (UID: \"8e906f47-2af7-4fae-8aa2-9bde21c41f5b\") " Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.072876 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "8e906f47-2af7-4fae-8aa2-9bde21c41f5b" (UID: "8e906f47-2af7-4fae-8aa2-9bde21c41f5b"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.073038 4812 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.073462 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "8e906f47-2af7-4fae-8aa2-9bde21c41f5b" (UID: "8e906f47-2af7-4fae-8aa2-9bde21c41f5b"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.077769 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-kube-api-access-2n6zv" (OuterVolumeSpecName: "kube-api-access-2n6zv") pod "8e906f47-2af7-4fae-8aa2-9bde21c41f5b" (UID: "8e906f47-2af7-4fae-8aa2-9bde21c41f5b"). InnerVolumeSpecName "kube-api-access-2n6zv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.085214 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-scripts" (OuterVolumeSpecName: "scripts") pod "8e906f47-2af7-4fae-8aa2-9bde21c41f5b" (UID: "8e906f47-2af7-4fae-8aa2-9bde21c41f5b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.135941 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "8e906f47-2af7-4fae-8aa2-9bde21c41f5b" (UID: "8e906f47-2af7-4fae-8aa2-9bde21c41f5b"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.167346 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.179098 4812 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.179139 4812 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.179154 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2n6zv\" (UniqueName: \"kubernetes.io/projected/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-kube-api-access-2n6zv\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.179168 4812 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.190739 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bb684768f-q79bp"] Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.194664 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.200505 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6bb684768f-q79bp"] Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.260473 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8e906f47-2af7-4fae-8aa2-9bde21c41f5b" (UID: "8e906f47-2af7-4fae-8aa2-9bde21c41f5b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.279888 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vmxrp\" (UniqueName: \"kubernetes.io/projected/d9186fd5-246f-4bb3-b3b8-926d6d66ed25-kube-api-access-vmxrp\") pod \"d9186fd5-246f-4bb3-b3b8-926d6d66ed25\" (UID: \"d9186fd5-246f-4bb3-b3b8-926d6d66ed25\") " Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.280010 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d9186fd5-246f-4bb3-b3b8-926d6d66ed25-scripts\") pod \"d9186fd5-246f-4bb3-b3b8-926d6d66ed25\" (UID: \"d9186fd5-246f-4bb3-b3b8-926d6d66ed25\") " Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.280052 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d9186fd5-246f-4bb3-b3b8-926d6d66ed25-etc-machine-id\") pod \"d9186fd5-246f-4bb3-b3b8-926d6d66ed25\" (UID: \"d9186fd5-246f-4bb3-b3b8-926d6d66ed25\") " Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.280073 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d9186fd5-246f-4bb3-b3b8-926d6d66ed25-config-data-custom\") pod \"d9186fd5-246f-4bb3-b3b8-926d6d66ed25\" (UID: \"d9186fd5-246f-4bb3-b3b8-926d6d66ed25\") " Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.280154 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9186fd5-246f-4bb3-b3b8-926d6d66ed25-combined-ca-bundle\") pod \"d9186fd5-246f-4bb3-b3b8-926d6d66ed25\" (UID: \"d9186fd5-246f-4bb3-b3b8-926d6d66ed25\") " Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.280215 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9186fd5-246f-4bb3-b3b8-926d6d66ed25-config-data\") pod \"d9186fd5-246f-4bb3-b3b8-926d6d66ed25\" (UID: \"d9186fd5-246f-4bb3-b3b8-926d6d66ed25\") " Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.280253 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-58fb9\" (UniqueName: \"kubernetes.io/projected/bf71da98-d4be-4c2f-a900-118282c5fa5f-kube-api-access-58fb9\") pod \"bf71da98-d4be-4c2f-a900-118282c5fa5f\" (UID: \"bf71da98-d4be-4c2f-a900-118282c5fa5f\") " Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.280688 4812 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.281211 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d9186fd5-246f-4bb3-b3b8-926d6d66ed25-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "d9186fd5-246f-4bb3-b3b8-926d6d66ed25" (UID: "d9186fd5-246f-4bb3-b3b8-926d6d66ed25"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.284666 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf71da98-d4be-4c2f-a900-118282c5fa5f-kube-api-access-58fb9" (OuterVolumeSpecName: "kube-api-access-58fb9") pod "bf71da98-d4be-4c2f-a900-118282c5fa5f" (UID: "bf71da98-d4be-4c2f-a900-118282c5fa5f"). InnerVolumeSpecName "kube-api-access-58fb9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.284895 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d9186fd5-246f-4bb3-b3b8-926d6d66ed25-kube-api-access-vmxrp" (OuterVolumeSpecName: "kube-api-access-vmxrp") pod "d9186fd5-246f-4bb3-b3b8-926d6d66ed25" (UID: "d9186fd5-246f-4bb3-b3b8-926d6d66ed25"). InnerVolumeSpecName "kube-api-access-vmxrp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.291691 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-config-data" (OuterVolumeSpecName: "config-data") pod "8e906f47-2af7-4fae-8aa2-9bde21c41f5b" (UID: "8e906f47-2af7-4fae-8aa2-9bde21c41f5b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.291811 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9186fd5-246f-4bb3-b3b8-926d6d66ed25-scripts" (OuterVolumeSpecName: "scripts") pod "d9186fd5-246f-4bb3-b3b8-926d6d66ed25" (UID: "d9186fd5-246f-4bb3-b3b8-926d6d66ed25"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.292236 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9186fd5-246f-4bb3-b3b8-926d6d66ed25-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "d9186fd5-246f-4bb3-b3b8-926d6d66ed25" (UID: "d9186fd5-246f-4bb3-b3b8-926d6d66ed25"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.337036 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9186fd5-246f-4bb3-b3b8-926d6d66ed25-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d9186fd5-246f-4bb3-b3b8-926d6d66ed25" (UID: "d9186fd5-246f-4bb3-b3b8-926d6d66ed25"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.381204 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9186fd5-246f-4bb3-b3b8-926d6d66ed25-config-data" (OuterVolumeSpecName: "config-data") pod "d9186fd5-246f-4bb3-b3b8-926d6d66ed25" (UID: "d9186fd5-246f-4bb3-b3b8-926d6d66ed25"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.382213 4812 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9186fd5-246f-4bb3-b3b8-926d6d66ed25-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.382234 4812 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e906f47-2af7-4fae-8aa2-9bde21c41f5b-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.382242 4812 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9186fd5-246f-4bb3-b3b8-926d6d66ed25-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.382252 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-58fb9\" (UniqueName: \"kubernetes.io/projected/bf71da98-d4be-4c2f-a900-118282c5fa5f-kube-api-access-58fb9\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.382264 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vmxrp\" (UniqueName: \"kubernetes.io/projected/d9186fd5-246f-4bb3-b3b8-926d6d66ed25-kube-api-access-vmxrp\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.382272 4812 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d9186fd5-246f-4bb3-b3b8-926d6d66ed25-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.382280 4812 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d9186fd5-246f-4bb3-b3b8-926d6d66ed25-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.382288 4812 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d9186fd5-246f-4bb3-b3b8-926d6d66ed25-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.627933 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-pgm8z"] Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.650278 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-2c67-account-create-2dzrx"] Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.665904 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cff9-account-create-772bb"] Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.675838 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-66bc-account-create-9fxvs"] Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.692738 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-2tnh5"] Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.700136 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-8jrgd"] Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.844182 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c9583db-6c4d-4ab8-b076-5f01d3f40f30" path="/var/lib/kubelet/pods/6c9583db-6c4d-4ab8-b076-5f01d3f40f30/volumes" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.854432 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-pgm8z" event={"ID":"64f71340-850a-4cf0-880a-5760dc7e1c4f","Type":"ContainerStarted","Data":"ef44624e7ae9e8cd285d9333fe21d50d5d1fefd1f5fe99d126e82a5833f1d720"} Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.868702 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8e906f47-2af7-4fae-8aa2-9bde21c41f5b","Type":"ContainerDied","Data":"1fcf02c0d6167ad60dd2e9ef9be9ac6b680dea7e1d910f100c1cead99f803ca3"} Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.868751 4812 scope.go:117] "RemoveContainer" containerID="410f288b8d4db28b8d6f4dafb693da59d1ef5c8429f207007b9ceded74b4dafe" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.868850 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.885841 4812 generic.go:334] "Generic (PLEG): container finished" podID="aff51d33-8e62-4c99-bc89-c6e53270b60c" containerID="a3df4db6ec8e762e8d7d2749d9960042f5364cd1bd5885a4b434c0d687223402" exitCode=0 Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.885919 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6c8d9b5d8d-dhthn" event={"ID":"aff51d33-8e62-4c99-bc89-c6e53270b60c","Type":"ContainerDied","Data":"a3df4db6ec8e762e8d7d2749d9960042f5364cd1bd5885a4b434c0d687223402"} Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.902891 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d9186fd5-246f-4bb3-b3b8-926d6d66ed25","Type":"ContainerDied","Data":"4f0a2cdb83515c3163d22adb2d1ac76d7056b94ca36805608dbdc919370ed73d"} Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.903079 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.910145 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"528b2f5b-a19a-48e1-a793-f66848d935fc","Type":"ContainerStarted","Data":"f33f103d9c6ff024665ace7e7fe1421561f3b9c1c08094f911bb94daa701e7a6"} Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.919418 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cff9-account-create-772bb" event={"ID":"6ba390cc-ed29-4af9-b3b5-582f5c3de736","Type":"ContainerStarted","Data":"5f08d2159fbaf2cae176591862f8f2fc2cb015d93abd97b137e88ac0119235ef"} Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.923076 4812 scope.go:117] "RemoveContainer" containerID="406148fd979e443955257808ae32b0c8fb09c037f1077e0f4f8e629c8e6ae834" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.924106 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-66bc-account-create-9fxvs" event={"ID":"d4d2ad77-439d-4039-98b1-f26eadcc542e","Type":"ContainerStarted","Data":"591f39bfb523b820b456b12adf53b3adc95bed5ce69597ef5149c29955420b7a"} Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.930891 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.957705737 podStartE2EDuration="12.930873833s" podCreationTimestamp="2025-11-25 17:05:31 +0000 UTC" firstStartedPulling="2025-11-25 17:05:32.728883728 +0000 UTC m=+1107.569025823" lastFinishedPulling="2025-11-25 17:05:42.702051824 +0000 UTC m=+1117.542193919" observedRunningTime="2025-11-25 17:05:43.924854201 +0000 UTC m=+1118.764996296" watchObservedRunningTime="2025-11-25 17:05:43.930873833 +0000 UTC m=+1118.771015928" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.932518 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-8jrgd" event={"ID":"3ffbf750-9ce8-454e-a031-eb09ebaa34a5","Type":"ContainerStarted","Data":"5d49374e68ba18cb3dd618408a975deaf861b04b8daba9b2468d93a4aaf1cd6d"} Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.949578 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-2c67-account-create-2dzrx" event={"ID":"0a32a31d-c114-4e2d-a8d8-5a6c0f0e518b","Type":"ContainerStarted","Data":"b132f1437153b701d0b676877f918b592e0316fe0909aefb1a6d82130f4cbe2c"} Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.955889 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-2tnh5" event={"ID":"941bf985-0c3b-4972-b4fe-182f891de32f","Type":"ContainerStarted","Data":"7cdecdc75eb685899b1a43dc0d397d193262e8ffc43709daf13ce9bb38aafcef"} Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.958680 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"bf71da98-d4be-4c2f-a900-118282c5fa5f","Type":"ContainerDied","Data":"fb18ccc0c8b795e8db94d4e1f8b03202c2df48cb10e14229e8cbf3ee5a5b91fb"} Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.958774 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.960584 4812 scope.go:117] "RemoveContainer" containerID="59b3568160a9648030cb271bf431816764b7a5f8ebbff9c08a6dadcbad303a3c" Nov 25 17:05:43 crc kubenswrapper[4812]: I1125 17:05:43.979398 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.002716 4812 scope.go:117] "RemoveContainer" containerID="80dfeb84ca827cd19f2bdee18aecb4ad1b32c4039d1200d483d4478467326750" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.017319 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.035596 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.071924 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.083758 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 17:05:44 crc kubenswrapper[4812]: E1125 17:05:44.084455 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9186fd5-246f-4bb3-b3b8-926d6d66ed25" containerName="cinder-scheduler" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.084473 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9186fd5-246f-4bb3-b3b8-926d6d66ed25" containerName="cinder-scheduler" Nov 25 17:05:44 crc kubenswrapper[4812]: E1125 17:05:44.084487 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e906f47-2af7-4fae-8aa2-9bde21c41f5b" containerName="proxy-httpd" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.084493 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e906f47-2af7-4fae-8aa2-9bde21c41f5b" containerName="proxy-httpd" Nov 25 17:05:44 crc kubenswrapper[4812]: E1125 17:05:44.084515 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e906f47-2af7-4fae-8aa2-9bde21c41f5b" containerName="ceilometer-central-agent" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.084521 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e906f47-2af7-4fae-8aa2-9bde21c41f5b" containerName="ceilometer-central-agent" Nov 25 17:05:44 crc kubenswrapper[4812]: E1125 17:05:44.084556 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e906f47-2af7-4fae-8aa2-9bde21c41f5b" containerName="sg-core" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.084563 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e906f47-2af7-4fae-8aa2-9bde21c41f5b" containerName="sg-core" Nov 25 17:05:44 crc kubenswrapper[4812]: E1125 17:05:44.084596 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c9583db-6c4d-4ab8-b076-5f01d3f40f30" containerName="init" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.084603 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c9583db-6c4d-4ab8-b076-5f01d3f40f30" containerName="init" Nov 25 17:05:44 crc kubenswrapper[4812]: E1125 17:05:44.084623 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf71da98-d4be-4c2f-a900-118282c5fa5f" containerName="kube-state-metrics" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.084631 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf71da98-d4be-4c2f-a900-118282c5fa5f" containerName="kube-state-metrics" Nov 25 17:05:44 crc kubenswrapper[4812]: E1125 17:05:44.084648 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e906f47-2af7-4fae-8aa2-9bde21c41f5b" containerName="ceilometer-notification-agent" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.084655 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e906f47-2af7-4fae-8aa2-9bde21c41f5b" containerName="ceilometer-notification-agent" Nov 25 17:05:44 crc kubenswrapper[4812]: E1125 17:05:44.084672 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c9583db-6c4d-4ab8-b076-5f01d3f40f30" containerName="dnsmasq-dns" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.084678 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c9583db-6c4d-4ab8-b076-5f01d3f40f30" containerName="dnsmasq-dns" Nov 25 17:05:44 crc kubenswrapper[4812]: E1125 17:05:44.084686 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9186fd5-246f-4bb3-b3b8-926d6d66ed25" containerName="probe" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.084692 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9186fd5-246f-4bb3-b3b8-926d6d66ed25" containerName="probe" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.085550 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf71da98-d4be-4c2f-a900-118282c5fa5f" containerName="kube-state-metrics" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.085581 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e906f47-2af7-4fae-8aa2-9bde21c41f5b" containerName="ceilometer-central-agent" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.085603 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e906f47-2af7-4fae-8aa2-9bde21c41f5b" containerName="ceilometer-notification-agent" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.085613 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="d9186fd5-246f-4bb3-b3b8-926d6d66ed25" containerName="probe" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.085640 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c9583db-6c4d-4ab8-b076-5f01d3f40f30" containerName="dnsmasq-dns" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.085654 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="d9186fd5-246f-4bb3-b3b8-926d6d66ed25" containerName="cinder-scheduler" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.085667 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e906f47-2af7-4fae-8aa2-9bde21c41f5b" containerName="sg-core" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.085687 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e906f47-2af7-4fae-8aa2-9bde21c41f5b" containerName="proxy-httpd" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.086423 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.089709 4812 scope.go:117] "RemoveContainer" containerID="eedc0473e6463e813a9e85f807e7a5e58cd743e9f12503905ea316bd03e0b40d" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.092583 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.092720 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.094660 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-xmqng" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.108291 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.120602 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.129221 4812 scope.go:117] "RemoveContainer" containerID="7d1a098a6b47e744784d9e942af465edc316f42eef7e52d2632632bdf6008bc8" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.129473 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.133980 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.143497 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.143908 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.144153 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6bb684768f-q79bp" podUID="6c9583db-6c4d-4ab8-b076-5f01d3f40f30" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.150:5353: i/o timeout" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.163581 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.171506 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.173607 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.177041 4812 scope.go:117] "RemoveContainer" containerID="d4dd23b8b48fdb9581e3623ad5103b7fef3d71ddf8eeeab3214504b8b1ae57e9" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.179028 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.179160 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.179495 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.180723 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.191787 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6c8d9b5d8d-dhthn" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.202389 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eeb4b7ce-d812-43f8-a575-683af1499cfa-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"eeb4b7ce-d812-43f8-a575-683af1499cfa\") " pod="openstack/cinder-scheduler-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.202479 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/eeb4b7ce-d812-43f8-a575-683af1499cfa-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"eeb4b7ce-d812-43f8-a575-683af1499cfa\") " pod="openstack/cinder-scheduler-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.202508 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eeb4b7ce-d812-43f8-a575-683af1499cfa-config-data\") pod \"cinder-scheduler-0\" (UID: \"eeb4b7ce-d812-43f8-a575-683af1499cfa\") " pod="openstack/cinder-scheduler-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.202579 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/91bfadbe-a98d-49e4-88a9-97be162972a5-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"91bfadbe-a98d-49e4-88a9-97be162972a5\") " pod="openstack/kube-state-metrics-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.202616 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eeb4b7ce-d812-43f8-a575-683af1499cfa-scripts\") pod \"cinder-scheduler-0\" (UID: \"eeb4b7ce-d812-43f8-a575-683af1499cfa\") " pod="openstack/cinder-scheduler-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.202649 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91bfadbe-a98d-49e4-88a9-97be162972a5-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"91bfadbe-a98d-49e4-88a9-97be162972a5\") " pod="openstack/kube-state-metrics-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.202683 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v9hqm\" (UniqueName: \"kubernetes.io/projected/eeb4b7ce-d812-43f8-a575-683af1499cfa-kube-api-access-v9hqm\") pod \"cinder-scheduler-0\" (UID: \"eeb4b7ce-d812-43f8-a575-683af1499cfa\") " pod="openstack/cinder-scheduler-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.202739 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/eeb4b7ce-d812-43f8-a575-683af1499cfa-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"eeb4b7ce-d812-43f8-a575-683af1499cfa\") " pod="openstack/cinder-scheduler-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.202786 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/91bfadbe-a98d-49e4-88a9-97be162972a5-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"91bfadbe-a98d-49e4-88a9-97be162972a5\") " pod="openstack/kube-state-metrics-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.202819 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jw5cd\" (UniqueName: \"kubernetes.io/projected/91bfadbe-a98d-49e4-88a9-97be162972a5-kube-api-access-jw5cd\") pod \"kube-state-metrics-0\" (UID: \"91bfadbe-a98d-49e4-88a9-97be162972a5\") " pod="openstack/kube-state-metrics-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.304246 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f9bhc\" (UniqueName: \"kubernetes.io/projected/aff51d33-8e62-4c99-bc89-c6e53270b60c-kube-api-access-f9bhc\") pod \"aff51d33-8e62-4c99-bc89-c6e53270b60c\" (UID: \"aff51d33-8e62-4c99-bc89-c6e53270b60c\") " Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.304558 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aff51d33-8e62-4c99-bc89-c6e53270b60c-combined-ca-bundle\") pod \"aff51d33-8e62-4c99-bc89-c6e53270b60c\" (UID: \"aff51d33-8e62-4c99-bc89-c6e53270b60c\") " Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.304684 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/aff51d33-8e62-4c99-bc89-c6e53270b60c-ovndb-tls-certs\") pod \"aff51d33-8e62-4c99-bc89-c6e53270b60c\" (UID: \"aff51d33-8e62-4c99-bc89-c6e53270b60c\") " Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.304798 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/aff51d33-8e62-4c99-bc89-c6e53270b60c-config\") pod \"aff51d33-8e62-4c99-bc89-c6e53270b60c\" (UID: \"aff51d33-8e62-4c99-bc89-c6e53270b60c\") " Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.304975 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/aff51d33-8e62-4c99-bc89-c6e53270b60c-httpd-config\") pod \"aff51d33-8e62-4c99-bc89-c6e53270b60c\" (UID: \"aff51d33-8e62-4c99-bc89-c6e53270b60c\") " Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.305261 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eeb4b7ce-d812-43f8-a575-683af1499cfa-config-data\") pod \"cinder-scheduler-0\" (UID: \"eeb4b7ce-d812-43f8-a575-683af1499cfa\") " pod="openstack/cinder-scheduler-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.305343 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a19d6900-fbec-4d99-9a22-6574b637ab4c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a19d6900-fbec-4d99-9a22-6574b637ab4c\") " pod="openstack/ceilometer-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.305423 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a19d6900-fbec-4d99-9a22-6574b637ab4c-run-httpd\") pod \"ceilometer-0\" (UID: \"a19d6900-fbec-4d99-9a22-6574b637ab4c\") " pod="openstack/ceilometer-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.308481 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/91bfadbe-a98d-49e4-88a9-97be162972a5-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"91bfadbe-a98d-49e4-88a9-97be162972a5\") " pod="openstack/kube-state-metrics-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.308620 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eeb4b7ce-d812-43f8-a575-683af1499cfa-scripts\") pod \"cinder-scheduler-0\" (UID: \"eeb4b7ce-d812-43f8-a575-683af1499cfa\") " pod="openstack/cinder-scheduler-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.308721 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91bfadbe-a98d-49e4-88a9-97be162972a5-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"91bfadbe-a98d-49e4-88a9-97be162972a5\") " pod="openstack/kube-state-metrics-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.308795 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a19d6900-fbec-4d99-9a22-6574b637ab4c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a19d6900-fbec-4d99-9a22-6574b637ab4c\") " pod="openstack/ceilometer-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.309160 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v9hqm\" (UniqueName: \"kubernetes.io/projected/eeb4b7ce-d812-43f8-a575-683af1499cfa-kube-api-access-v9hqm\") pod \"cinder-scheduler-0\" (UID: \"eeb4b7ce-d812-43f8-a575-683af1499cfa\") " pod="openstack/cinder-scheduler-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.312684 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aff51d33-8e62-4c99-bc89-c6e53270b60c-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "aff51d33-8e62-4c99-bc89-c6e53270b60c" (UID: "aff51d33-8e62-4c99-bc89-c6e53270b60c"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.316748 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/eeb4b7ce-d812-43f8-a575-683af1499cfa-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"eeb4b7ce-d812-43f8-a575-683af1499cfa\") " pod="openstack/cinder-scheduler-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.316795 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a19d6900-fbec-4d99-9a22-6574b637ab4c-scripts\") pod \"ceilometer-0\" (UID: \"a19d6900-fbec-4d99-9a22-6574b637ab4c\") " pod="openstack/ceilometer-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.316812 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/a19d6900-fbec-4d99-9a22-6574b637ab4c-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"a19d6900-fbec-4d99-9a22-6574b637ab4c\") " pod="openstack/ceilometer-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.316830 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/eeb4b7ce-d812-43f8-a575-683af1499cfa-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"eeb4b7ce-d812-43f8-a575-683af1499cfa\") " pod="openstack/cinder-scheduler-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.316860 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/91bfadbe-a98d-49e4-88a9-97be162972a5-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"91bfadbe-a98d-49e4-88a9-97be162972a5\") " pod="openstack/kube-state-metrics-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.316898 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jw5cd\" (UniqueName: \"kubernetes.io/projected/91bfadbe-a98d-49e4-88a9-97be162972a5-kube-api-access-jw5cd\") pod \"kube-state-metrics-0\" (UID: \"91bfadbe-a98d-49e4-88a9-97be162972a5\") " pod="openstack/kube-state-metrics-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.316930 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7wc6w\" (UniqueName: \"kubernetes.io/projected/a19d6900-fbec-4d99-9a22-6574b637ab4c-kube-api-access-7wc6w\") pod \"ceilometer-0\" (UID: \"a19d6900-fbec-4d99-9a22-6574b637ab4c\") " pod="openstack/ceilometer-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.317005 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eeb4b7ce-d812-43f8-a575-683af1499cfa-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"eeb4b7ce-d812-43f8-a575-683af1499cfa\") " pod="openstack/cinder-scheduler-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.317067 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a19d6900-fbec-4d99-9a22-6574b637ab4c-config-data\") pod \"ceilometer-0\" (UID: \"a19d6900-fbec-4d99-9a22-6574b637ab4c\") " pod="openstack/ceilometer-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.317125 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a19d6900-fbec-4d99-9a22-6574b637ab4c-log-httpd\") pod \"ceilometer-0\" (UID: \"a19d6900-fbec-4d99-9a22-6574b637ab4c\") " pod="openstack/ceilometer-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.317148 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/eeb4b7ce-d812-43f8-a575-683af1499cfa-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"eeb4b7ce-d812-43f8-a575-683af1499cfa\") " pod="openstack/cinder-scheduler-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.317227 4812 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/aff51d33-8e62-4c99-bc89-c6e53270b60c-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.316277 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aff51d33-8e62-4c99-bc89-c6e53270b60c-kube-api-access-f9bhc" (OuterVolumeSpecName: "kube-api-access-f9bhc") pod "aff51d33-8e62-4c99-bc89-c6e53270b60c" (UID: "aff51d33-8e62-4c99-bc89-c6e53270b60c"). InnerVolumeSpecName "kube-api-access-f9bhc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.319570 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/91bfadbe-a98d-49e4-88a9-97be162972a5-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"91bfadbe-a98d-49e4-88a9-97be162972a5\") " pod="openstack/kube-state-metrics-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.322392 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eeb4b7ce-d812-43f8-a575-683af1499cfa-config-data\") pod \"cinder-scheduler-0\" (UID: \"eeb4b7ce-d812-43f8-a575-683af1499cfa\") " pod="openstack/cinder-scheduler-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.323909 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91bfadbe-a98d-49e4-88a9-97be162972a5-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"91bfadbe-a98d-49e4-88a9-97be162972a5\") " pod="openstack/kube-state-metrics-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.325719 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eeb4b7ce-d812-43f8-a575-683af1499cfa-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"eeb4b7ce-d812-43f8-a575-683af1499cfa\") " pod="openstack/cinder-scheduler-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.325963 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/91bfadbe-a98d-49e4-88a9-97be162972a5-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"91bfadbe-a98d-49e4-88a9-97be162972a5\") " pod="openstack/kube-state-metrics-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.327460 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eeb4b7ce-d812-43f8-a575-683af1499cfa-scripts\") pod \"cinder-scheduler-0\" (UID: \"eeb4b7ce-d812-43f8-a575-683af1499cfa\") " pod="openstack/cinder-scheduler-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.339203 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v9hqm\" (UniqueName: \"kubernetes.io/projected/eeb4b7ce-d812-43f8-a575-683af1499cfa-kube-api-access-v9hqm\") pod \"cinder-scheduler-0\" (UID: \"eeb4b7ce-d812-43f8-a575-683af1499cfa\") " pod="openstack/cinder-scheduler-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.343433 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jw5cd\" (UniqueName: \"kubernetes.io/projected/91bfadbe-a98d-49e4-88a9-97be162972a5-kube-api-access-jw5cd\") pod \"kube-state-metrics-0\" (UID: \"91bfadbe-a98d-49e4-88a9-97be162972a5\") " pod="openstack/kube-state-metrics-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.343953 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/eeb4b7ce-d812-43f8-a575-683af1499cfa-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"eeb4b7ce-d812-43f8-a575-683af1499cfa\") " pod="openstack/cinder-scheduler-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.413785 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aff51d33-8e62-4c99-bc89-c6e53270b60c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "aff51d33-8e62-4c99-bc89-c6e53270b60c" (UID: "aff51d33-8e62-4c99-bc89-c6e53270b60c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.419701 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.420942 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a19d6900-fbec-4d99-9a22-6574b637ab4c-config-data\") pod \"ceilometer-0\" (UID: \"a19d6900-fbec-4d99-9a22-6574b637ab4c\") " pod="openstack/ceilometer-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.420996 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a19d6900-fbec-4d99-9a22-6574b637ab4c-log-httpd\") pod \"ceilometer-0\" (UID: \"a19d6900-fbec-4d99-9a22-6574b637ab4c\") " pod="openstack/ceilometer-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.421029 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a19d6900-fbec-4d99-9a22-6574b637ab4c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a19d6900-fbec-4d99-9a22-6574b637ab4c\") " pod="openstack/ceilometer-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.421062 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a19d6900-fbec-4d99-9a22-6574b637ab4c-run-httpd\") pod \"ceilometer-0\" (UID: \"a19d6900-fbec-4d99-9a22-6574b637ab4c\") " pod="openstack/ceilometer-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.421114 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a19d6900-fbec-4d99-9a22-6574b637ab4c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a19d6900-fbec-4d99-9a22-6574b637ab4c\") " pod="openstack/ceilometer-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.421168 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a19d6900-fbec-4d99-9a22-6574b637ab4c-scripts\") pod \"ceilometer-0\" (UID: \"a19d6900-fbec-4d99-9a22-6574b637ab4c\") " pod="openstack/ceilometer-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.421190 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/a19d6900-fbec-4d99-9a22-6574b637ab4c-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"a19d6900-fbec-4d99-9a22-6574b637ab4c\") " pod="openstack/ceilometer-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.421232 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7wc6w\" (UniqueName: \"kubernetes.io/projected/a19d6900-fbec-4d99-9a22-6574b637ab4c-kube-api-access-7wc6w\") pod \"ceilometer-0\" (UID: \"a19d6900-fbec-4d99-9a22-6574b637ab4c\") " pod="openstack/ceilometer-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.421291 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f9bhc\" (UniqueName: \"kubernetes.io/projected/aff51d33-8e62-4c99-bc89-c6e53270b60c-kube-api-access-f9bhc\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.421301 4812 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aff51d33-8e62-4c99-bc89-c6e53270b60c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.422311 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a19d6900-fbec-4d99-9a22-6574b637ab4c-run-httpd\") pod \"ceilometer-0\" (UID: \"a19d6900-fbec-4d99-9a22-6574b637ab4c\") " pod="openstack/ceilometer-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.422622 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a19d6900-fbec-4d99-9a22-6574b637ab4c-log-httpd\") pod \"ceilometer-0\" (UID: \"a19d6900-fbec-4d99-9a22-6574b637ab4c\") " pod="openstack/ceilometer-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.422731 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aff51d33-8e62-4c99-bc89-c6e53270b60c-config" (OuterVolumeSpecName: "config") pod "aff51d33-8e62-4c99-bc89-c6e53270b60c" (UID: "aff51d33-8e62-4c99-bc89-c6e53270b60c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.426575 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/a19d6900-fbec-4d99-9a22-6574b637ab4c-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"a19d6900-fbec-4d99-9a22-6574b637ab4c\") " pod="openstack/ceilometer-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.426977 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a19d6900-fbec-4d99-9a22-6574b637ab4c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a19d6900-fbec-4d99-9a22-6574b637ab4c\") " pod="openstack/ceilometer-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.431253 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a19d6900-fbec-4d99-9a22-6574b637ab4c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a19d6900-fbec-4d99-9a22-6574b637ab4c\") " pod="openstack/ceilometer-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.432185 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a19d6900-fbec-4d99-9a22-6574b637ab4c-scripts\") pod \"ceilometer-0\" (UID: \"a19d6900-fbec-4d99-9a22-6574b637ab4c\") " pod="openstack/ceilometer-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.437470 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7wc6w\" (UniqueName: \"kubernetes.io/projected/a19d6900-fbec-4d99-9a22-6574b637ab4c-kube-api-access-7wc6w\") pod \"ceilometer-0\" (UID: \"a19d6900-fbec-4d99-9a22-6574b637ab4c\") " pod="openstack/ceilometer-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.438654 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a19d6900-fbec-4d99-9a22-6574b637ab4c-config-data\") pod \"ceilometer-0\" (UID: \"a19d6900-fbec-4d99-9a22-6574b637ab4c\") " pod="openstack/ceilometer-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.444959 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aff51d33-8e62-4c99-bc89-c6e53270b60c-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "aff51d33-8e62-4c99-bc89-c6e53270b60c" (UID: "aff51d33-8e62-4c99-bc89-c6e53270b60c"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.481037 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.506977 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.523481 4812 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/aff51d33-8e62-4c99-bc89-c6e53270b60c-config\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.523515 4812 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/aff51d33-8e62-4c99-bc89-c6e53270b60c-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.896189 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 17:05:44 crc kubenswrapper[4812]: W1125 17:05:44.905331 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod91bfadbe_a98d_49e4_88a9_97be162972a5.slice/crio-ac1ee0f61c56eadf21dcda14c48621c1c7fee6af903423ec3c4068f701ee6043 WatchSource:0}: Error finding container ac1ee0f61c56eadf21dcda14c48621c1c7fee6af903423ec3c4068f701ee6043: Status 404 returned error can't find the container with id ac1ee0f61c56eadf21dcda14c48621c1c7fee6af903423ec3c4068f701ee6043 Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.970681 4812 generic.go:334] "Generic (PLEG): container finished" podID="d4d2ad77-439d-4039-98b1-f26eadcc542e" containerID="cf259ffa2d6cfff5e626a60c9259f7b88c8610d83737e49f48dc219365ce6512" exitCode=0 Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.970735 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-66bc-account-create-9fxvs" event={"ID":"d4d2ad77-439d-4039-98b1-f26eadcc542e","Type":"ContainerDied","Data":"cf259ffa2d6cfff5e626a60c9259f7b88c8610d83737e49f48dc219365ce6512"} Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.972859 4812 generic.go:334] "Generic (PLEG): container finished" podID="941bf985-0c3b-4972-b4fe-182f891de32f" containerID="8ea359b43672d054928c8e68d05f43fa720edd480d3734062ce481f2c051d289" exitCode=0 Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.972893 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-2tnh5" event={"ID":"941bf985-0c3b-4972-b4fe-182f891de32f","Type":"ContainerDied","Data":"8ea359b43672d054928c8e68d05f43fa720edd480d3734062ce481f2c051d289"} Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.980179 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"91bfadbe-a98d-49e4-88a9-97be162972a5","Type":"ContainerStarted","Data":"ac1ee0f61c56eadf21dcda14c48621c1c7fee6af903423ec3c4068f701ee6043"} Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.982085 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6c8d9b5d8d-dhthn" event={"ID":"aff51d33-8e62-4c99-bc89-c6e53270b60c","Type":"ContainerDied","Data":"d72788c9a5e209d04c56729463e5a62c826307372f233553eb06b6639d03bca0"} Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.982123 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6c8d9b5d8d-dhthn" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.982141 4812 scope.go:117] "RemoveContainer" containerID="abda29774ee0b1e64e94d61d01013714faa29d1bf9119b37ef10ca17ac6386ba" Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.994578 4812 generic.go:334] "Generic (PLEG): container finished" podID="3ffbf750-9ce8-454e-a031-eb09ebaa34a5" containerID="c11d340edffb8007ba2bef28b7c0676a9322cee13cd7905a314cf4d9553da4bc" exitCode=0 Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.994694 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-8jrgd" event={"ID":"3ffbf750-9ce8-454e-a031-eb09ebaa34a5","Type":"ContainerDied","Data":"c11d340edffb8007ba2bef28b7c0676a9322cee13cd7905a314cf4d9553da4bc"} Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.997424 4812 generic.go:334] "Generic (PLEG): container finished" podID="0a32a31d-c114-4e2d-a8d8-5a6c0f0e518b" containerID="ddc9240f16a4c291bc708104e69390f49d6d05f043cd815df2fde700fb198348" exitCode=0 Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.997865 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-2c67-account-create-2dzrx" event={"ID":"0a32a31d-c114-4e2d-a8d8-5a6c0f0e518b","Type":"ContainerDied","Data":"ddc9240f16a4c291bc708104e69390f49d6d05f043cd815df2fde700fb198348"} Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.999336 4812 generic.go:334] "Generic (PLEG): container finished" podID="6ba390cc-ed29-4af9-b3b5-582f5c3de736" containerID="7ede419f7de410b3b355315b75aa48ca8908060061bd4733ab07a85e39aefab6" exitCode=0 Nov 25 17:05:44 crc kubenswrapper[4812]: I1125 17:05:44.999592 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cff9-account-create-772bb" event={"ID":"6ba390cc-ed29-4af9-b3b5-582f5c3de736","Type":"ContainerDied","Data":"7ede419f7de410b3b355315b75aa48ca8908060061bd4733ab07a85e39aefab6"} Nov 25 17:05:45 crc kubenswrapper[4812]: I1125 17:05:45.001185 4812 generic.go:334] "Generic (PLEG): container finished" podID="64f71340-850a-4cf0-880a-5760dc7e1c4f" containerID="5bba6f465dbc381cff57a71703a8e3ef80f656c6591947820c0797452ab14730" exitCode=0 Nov 25 17:05:45 crc kubenswrapper[4812]: I1125 17:05:45.001202 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-pgm8z" event={"ID":"64f71340-850a-4cf0-880a-5760dc7e1c4f","Type":"ContainerDied","Data":"5bba6f465dbc381cff57a71703a8e3ef80f656c6591947820c0797452ab14730"} Nov 25 17:05:45 crc kubenswrapper[4812]: I1125 17:05:45.027395 4812 scope.go:117] "RemoveContainer" containerID="a3df4db6ec8e762e8d7d2749d9960042f5364cd1bd5885a4b434c0d687223402" Nov 25 17:05:45 crc kubenswrapper[4812]: I1125 17:05:45.067587 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:05:45 crc kubenswrapper[4812]: W1125 17:05:45.072064 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podeeb4b7ce_d812_43f8_a575_683af1499cfa.slice/crio-0b5940993d9d0c62e31608d6fd8db4beb3d53e69ea3b0cbc388ab8d5ca2f76df WatchSource:0}: Error finding container 0b5940993d9d0c62e31608d6fd8db4beb3d53e69ea3b0cbc388ab8d5ca2f76df: Status 404 returned error can't find the container with id 0b5940993d9d0c62e31608d6fd8db4beb3d53e69ea3b0cbc388ab8d5ca2f76df Nov 25 17:05:45 crc kubenswrapper[4812]: I1125 17:05:45.088635 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 17:05:45 crc kubenswrapper[4812]: I1125 17:05:45.198599 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-6c8d9b5d8d-dhthn"] Nov 25 17:05:45 crc kubenswrapper[4812]: I1125 17:05:45.217176 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-6c8d9b5d8d-dhthn"] Nov 25 17:05:45 crc kubenswrapper[4812]: I1125 17:05:45.844659 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e906f47-2af7-4fae-8aa2-9bde21c41f5b" path="/var/lib/kubelet/pods/8e906f47-2af7-4fae-8aa2-9bde21c41f5b/volumes" Nov 25 17:05:45 crc kubenswrapper[4812]: I1125 17:05:45.845805 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aff51d33-8e62-4c99-bc89-c6e53270b60c" path="/var/lib/kubelet/pods/aff51d33-8e62-4c99-bc89-c6e53270b60c/volumes" Nov 25 17:05:45 crc kubenswrapper[4812]: I1125 17:05:45.846415 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf71da98-d4be-4c2f-a900-118282c5fa5f" path="/var/lib/kubelet/pods/bf71da98-d4be-4c2f-a900-118282c5fa5f/volumes" Nov 25 17:05:45 crc kubenswrapper[4812]: I1125 17:05:45.847442 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d9186fd5-246f-4bb3-b3b8-926d6d66ed25" path="/var/lib/kubelet/pods/d9186fd5-246f-4bb3-b3b8-926d6d66ed25/volumes" Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.015510 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"eeb4b7ce-d812-43f8-a575-683af1499cfa","Type":"ContainerStarted","Data":"75c55050ae46b3a754a5f96aeb9ef45a64834fd397b0f46607ee9f391d293762"} Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.015815 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"eeb4b7ce-d812-43f8-a575-683af1499cfa","Type":"ContainerStarted","Data":"0b5940993d9d0c62e31608d6fd8db4beb3d53e69ea3b0cbc388ab8d5ca2f76df"} Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.018351 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a19d6900-fbec-4d99-9a22-6574b637ab4c","Type":"ContainerStarted","Data":"ecf80a1d993c72fc1b003327ae06d9881adae76bb823c11909228e138c25cc50"} Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.018407 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a19d6900-fbec-4d99-9a22-6574b637ab4c","Type":"ContainerStarted","Data":"12a83b9e9bcef1ec0cb0a7798bbf853bc533a5b14a9ce4c50619969afed92973"} Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.019706 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"91bfadbe-a98d-49e4-88a9-97be162972a5","Type":"ContainerStarted","Data":"9276970c7d082980d12fd08e86ecc74f850d83c8eb74759a48e704f346e2c5cc"} Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.019886 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.041986 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.43925788 podStartE2EDuration="3.04196897s" podCreationTimestamp="2025-11-25 17:05:43 +0000 UTC" firstStartedPulling="2025-11-25 17:05:44.90810431 +0000 UTC m=+1119.748246405" lastFinishedPulling="2025-11-25 17:05:45.5108154 +0000 UTC m=+1120.350957495" observedRunningTime="2025-11-25 17:05:46.039023111 +0000 UTC m=+1120.879165206" watchObservedRunningTime="2025-11-25 17:05:46.04196897 +0000 UTC m=+1120.882111065" Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.410196 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-pgm8z" Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.584018 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/64f71340-850a-4cf0-880a-5760dc7e1c4f-operator-scripts\") pod \"64f71340-850a-4cf0-880a-5760dc7e1c4f\" (UID: \"64f71340-850a-4cf0-880a-5760dc7e1c4f\") " Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.584136 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4wbxs\" (UniqueName: \"kubernetes.io/projected/64f71340-850a-4cf0-880a-5760dc7e1c4f-kube-api-access-4wbxs\") pod \"64f71340-850a-4cf0-880a-5760dc7e1c4f\" (UID: \"64f71340-850a-4cf0-880a-5760dc7e1c4f\") " Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.586505 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/64f71340-850a-4cf0-880a-5760dc7e1c4f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "64f71340-850a-4cf0-880a-5760dc7e1c4f" (UID: "64f71340-850a-4cf0-880a-5760dc7e1c4f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.611445 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64f71340-850a-4cf0-880a-5760dc7e1c4f-kube-api-access-4wbxs" (OuterVolumeSpecName: "kube-api-access-4wbxs") pod "64f71340-850a-4cf0-880a-5760dc7e1c4f" (UID: "64f71340-850a-4cf0-880a-5760dc7e1c4f"). InnerVolumeSpecName "kube-api-access-4wbxs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.688081 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4wbxs\" (UniqueName: \"kubernetes.io/projected/64f71340-850a-4cf0-880a-5760dc7e1c4f-kube-api-access-4wbxs\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.688450 4812 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/64f71340-850a-4cf0-880a-5760dc7e1c4f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.798397 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-2c67-account-create-2dzrx" Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.801542 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-2tnh5" Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.822068 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-66bc-account-create-9fxvs" Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.853108 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cff9-account-create-772bb" Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.864798 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-8jrgd" Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.891829 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8hkfd\" (UniqueName: \"kubernetes.io/projected/0a32a31d-c114-4e2d-a8d8-5a6c0f0e518b-kube-api-access-8hkfd\") pod \"0a32a31d-c114-4e2d-a8d8-5a6c0f0e518b\" (UID: \"0a32a31d-c114-4e2d-a8d8-5a6c0f0e518b\") " Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.892894 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-54278\" (UniqueName: \"kubernetes.io/projected/941bf985-0c3b-4972-b4fe-182f891de32f-kube-api-access-54278\") pod \"941bf985-0c3b-4972-b4fe-182f891de32f\" (UID: \"941bf985-0c3b-4972-b4fe-182f891de32f\") " Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.893000 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/941bf985-0c3b-4972-b4fe-182f891de32f-operator-scripts\") pod \"941bf985-0c3b-4972-b4fe-182f891de32f\" (UID: \"941bf985-0c3b-4972-b4fe-182f891de32f\") " Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.893686 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0a32a31d-c114-4e2d-a8d8-5a6c0f0e518b-operator-scripts\") pod \"0a32a31d-c114-4e2d-a8d8-5a6c0f0e518b\" (UID: \"0a32a31d-c114-4e2d-a8d8-5a6c0f0e518b\") " Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.893990 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/941bf985-0c3b-4972-b4fe-182f891de32f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "941bf985-0c3b-4972-b4fe-182f891de32f" (UID: "941bf985-0c3b-4972-b4fe-182f891de32f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.896600 4812 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/941bf985-0c3b-4972-b4fe-182f891de32f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.896841 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a32a31d-c114-4e2d-a8d8-5a6c0f0e518b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0a32a31d-c114-4e2d-a8d8-5a6c0f0e518b" (UID: "0a32a31d-c114-4e2d-a8d8-5a6c0f0e518b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.899370 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/941bf985-0c3b-4972-b4fe-182f891de32f-kube-api-access-54278" (OuterVolumeSpecName: "kube-api-access-54278") pod "941bf985-0c3b-4972-b4fe-182f891de32f" (UID: "941bf985-0c3b-4972-b4fe-182f891de32f"). InnerVolumeSpecName "kube-api-access-54278". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.903896 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a32a31d-c114-4e2d-a8d8-5a6c0f0e518b-kube-api-access-8hkfd" (OuterVolumeSpecName: "kube-api-access-8hkfd") pod "0a32a31d-c114-4e2d-a8d8-5a6c0f0e518b" (UID: "0a32a31d-c114-4e2d-a8d8-5a6c0f0e518b"). InnerVolumeSpecName "kube-api-access-8hkfd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.997832 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gz9bc\" (UniqueName: \"kubernetes.io/projected/3ffbf750-9ce8-454e-a031-eb09ebaa34a5-kube-api-access-gz9bc\") pod \"3ffbf750-9ce8-454e-a031-eb09ebaa34a5\" (UID: \"3ffbf750-9ce8-454e-a031-eb09ebaa34a5\") " Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.997979 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lklkn\" (UniqueName: \"kubernetes.io/projected/6ba390cc-ed29-4af9-b3b5-582f5c3de736-kube-api-access-lklkn\") pod \"6ba390cc-ed29-4af9-b3b5-582f5c3de736\" (UID: \"6ba390cc-ed29-4af9-b3b5-582f5c3de736\") " Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.998029 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8hx28\" (UniqueName: \"kubernetes.io/projected/d4d2ad77-439d-4039-98b1-f26eadcc542e-kube-api-access-8hx28\") pod \"d4d2ad77-439d-4039-98b1-f26eadcc542e\" (UID: \"d4d2ad77-439d-4039-98b1-f26eadcc542e\") " Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.998068 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6ba390cc-ed29-4af9-b3b5-582f5c3de736-operator-scripts\") pod \"6ba390cc-ed29-4af9-b3b5-582f5c3de736\" (UID: \"6ba390cc-ed29-4af9-b3b5-582f5c3de736\") " Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.998108 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d4d2ad77-439d-4039-98b1-f26eadcc542e-operator-scripts\") pod \"d4d2ad77-439d-4039-98b1-f26eadcc542e\" (UID: \"d4d2ad77-439d-4039-98b1-f26eadcc542e\") " Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.998762 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3ffbf750-9ce8-454e-a031-eb09ebaa34a5-operator-scripts\") pod \"3ffbf750-9ce8-454e-a031-eb09ebaa34a5\" (UID: \"3ffbf750-9ce8-454e-a031-eb09ebaa34a5\") " Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.998973 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ba390cc-ed29-4af9-b3b5-582f5c3de736-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6ba390cc-ed29-4af9-b3b5-582f5c3de736" (UID: "6ba390cc-ed29-4af9-b3b5-582f5c3de736"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.999392 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ffbf750-9ce8-454e-a031-eb09ebaa34a5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3ffbf750-9ce8-454e-a031-eb09ebaa34a5" (UID: "3ffbf750-9ce8-454e-a031-eb09ebaa34a5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.999736 4812 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0a32a31d-c114-4e2d-a8d8-5a6c0f0e518b-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.999895 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8hkfd\" (UniqueName: \"kubernetes.io/projected/0a32a31d-c114-4e2d-a8d8-5a6c0f0e518b-kube-api-access-8hkfd\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:46 crc kubenswrapper[4812]: I1125 17:05:46.999984 4812 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6ba390cc-ed29-4af9-b3b5-582f5c3de736-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:47 crc kubenswrapper[4812]: I1125 17:05:47.000077 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-54278\" (UniqueName: \"kubernetes.io/projected/941bf985-0c3b-4972-b4fe-182f891de32f-kube-api-access-54278\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:47 crc kubenswrapper[4812]: I1125 17:05:47.000596 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d4d2ad77-439d-4039-98b1-f26eadcc542e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d4d2ad77-439d-4039-98b1-f26eadcc542e" (UID: "d4d2ad77-439d-4039-98b1-f26eadcc542e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:05:47 crc kubenswrapper[4812]: I1125 17:05:47.003184 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ffbf750-9ce8-454e-a031-eb09ebaa34a5-kube-api-access-gz9bc" (OuterVolumeSpecName: "kube-api-access-gz9bc") pod "3ffbf750-9ce8-454e-a031-eb09ebaa34a5" (UID: "3ffbf750-9ce8-454e-a031-eb09ebaa34a5"). InnerVolumeSpecName "kube-api-access-gz9bc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:05:47 crc kubenswrapper[4812]: I1125 17:05:47.004960 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ba390cc-ed29-4af9-b3b5-582f5c3de736-kube-api-access-lklkn" (OuterVolumeSpecName: "kube-api-access-lklkn") pod "6ba390cc-ed29-4af9-b3b5-582f5c3de736" (UID: "6ba390cc-ed29-4af9-b3b5-582f5c3de736"). InnerVolumeSpecName "kube-api-access-lklkn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:05:47 crc kubenswrapper[4812]: I1125 17:05:47.005663 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d4d2ad77-439d-4039-98b1-f26eadcc542e-kube-api-access-8hx28" (OuterVolumeSpecName: "kube-api-access-8hx28") pod "d4d2ad77-439d-4039-98b1-f26eadcc542e" (UID: "d4d2ad77-439d-4039-98b1-f26eadcc542e"). InnerVolumeSpecName "kube-api-access-8hx28". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:05:47 crc kubenswrapper[4812]: I1125 17:05:47.033575 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-2tnh5" event={"ID":"941bf985-0c3b-4972-b4fe-182f891de32f","Type":"ContainerDied","Data":"7cdecdc75eb685899b1a43dc0d397d193262e8ffc43709daf13ce9bb38aafcef"} Nov 25 17:05:47 crc kubenswrapper[4812]: I1125 17:05:47.033619 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7cdecdc75eb685899b1a43dc0d397d193262e8ffc43709daf13ce9bb38aafcef" Nov 25 17:05:47 crc kubenswrapper[4812]: I1125 17:05:47.033619 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-2tnh5" Nov 25 17:05:47 crc kubenswrapper[4812]: I1125 17:05:47.040117 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cff9-account-create-772bb" event={"ID":"6ba390cc-ed29-4af9-b3b5-582f5c3de736","Type":"ContainerDied","Data":"5f08d2159fbaf2cae176591862f8f2fc2cb015d93abd97b137e88ac0119235ef"} Nov 25 17:05:47 crc kubenswrapper[4812]: I1125 17:05:47.040156 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5f08d2159fbaf2cae176591862f8f2fc2cb015d93abd97b137e88ac0119235ef" Nov 25 17:05:47 crc kubenswrapper[4812]: I1125 17:05:47.040224 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cff9-account-create-772bb" Nov 25 17:05:47 crc kubenswrapper[4812]: I1125 17:05:47.047337 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-pgm8z" Nov 25 17:05:47 crc kubenswrapper[4812]: I1125 17:05:47.047638 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-pgm8z" event={"ID":"64f71340-850a-4cf0-880a-5760dc7e1c4f","Type":"ContainerDied","Data":"ef44624e7ae9e8cd285d9333fe21d50d5d1fefd1f5fe99d126e82a5833f1d720"} Nov 25 17:05:47 crc kubenswrapper[4812]: I1125 17:05:47.047685 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ef44624e7ae9e8cd285d9333fe21d50d5d1fefd1f5fe99d126e82a5833f1d720" Nov 25 17:05:47 crc kubenswrapper[4812]: I1125 17:05:47.052452 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a19d6900-fbec-4d99-9a22-6574b637ab4c","Type":"ContainerStarted","Data":"294794b54f41656c5ee39a3701d8c62a39507c75c9bf7b773a6367db763ed660"} Nov 25 17:05:47 crc kubenswrapper[4812]: I1125 17:05:47.062226 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-66bc-account-create-9fxvs" event={"ID":"d4d2ad77-439d-4039-98b1-f26eadcc542e","Type":"ContainerDied","Data":"591f39bfb523b820b456b12adf53b3adc95bed5ce69597ef5149c29955420b7a"} Nov 25 17:05:47 crc kubenswrapper[4812]: I1125 17:05:47.062277 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="591f39bfb523b820b456b12adf53b3adc95bed5ce69597ef5149c29955420b7a" Nov 25 17:05:47 crc kubenswrapper[4812]: I1125 17:05:47.062247 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-66bc-account-create-9fxvs" Nov 25 17:05:47 crc kubenswrapper[4812]: I1125 17:05:47.063593 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-8jrgd" event={"ID":"3ffbf750-9ce8-454e-a031-eb09ebaa34a5","Type":"ContainerDied","Data":"5d49374e68ba18cb3dd618408a975deaf861b04b8daba9b2468d93a4aaf1cd6d"} Nov 25 17:05:47 crc kubenswrapper[4812]: I1125 17:05:47.063631 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5d49374e68ba18cb3dd618408a975deaf861b04b8daba9b2468d93a4aaf1cd6d" Nov 25 17:05:47 crc kubenswrapper[4812]: I1125 17:05:47.064991 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-8jrgd" Nov 25 17:05:47 crc kubenswrapper[4812]: I1125 17:05:47.071344 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-2c67-account-create-2dzrx" Nov 25 17:05:47 crc kubenswrapper[4812]: I1125 17:05:47.071992 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-2c67-account-create-2dzrx" event={"ID":"0a32a31d-c114-4e2d-a8d8-5a6c0f0e518b","Type":"ContainerDied","Data":"b132f1437153b701d0b676877f918b592e0316fe0909aefb1a6d82130f4cbe2c"} Nov 25 17:05:47 crc kubenswrapper[4812]: I1125 17:05:47.072032 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b132f1437153b701d0b676877f918b592e0316fe0909aefb1a6d82130f4cbe2c" Nov 25 17:05:47 crc kubenswrapper[4812]: I1125 17:05:47.102083 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lklkn\" (UniqueName: \"kubernetes.io/projected/6ba390cc-ed29-4af9-b3b5-582f5c3de736-kube-api-access-lklkn\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:47 crc kubenswrapper[4812]: I1125 17:05:47.102117 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8hx28\" (UniqueName: \"kubernetes.io/projected/d4d2ad77-439d-4039-98b1-f26eadcc542e-kube-api-access-8hx28\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:47 crc kubenswrapper[4812]: I1125 17:05:47.102128 4812 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d4d2ad77-439d-4039-98b1-f26eadcc542e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:47 crc kubenswrapper[4812]: I1125 17:05:47.102136 4812 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3ffbf750-9ce8-454e-a031-eb09ebaa34a5-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:47 crc kubenswrapper[4812]: I1125 17:05:47.102145 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gz9bc\" (UniqueName: \"kubernetes.io/projected/3ffbf750-9ce8-454e-a031-eb09ebaa34a5-kube-api-access-gz9bc\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:47 crc kubenswrapper[4812]: I1125 17:05:47.391910 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:05:48 crc kubenswrapper[4812]: I1125 17:05:48.081220 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"eeb4b7ce-d812-43f8-a575-683af1499cfa","Type":"ContainerStarted","Data":"360c6b095ccbe75cd84633eebc1cb4dcf1ac7ea913b584ad817498448646f96e"} Nov 25 17:05:48 crc kubenswrapper[4812]: I1125 17:05:48.117679 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=5.117657203 podStartE2EDuration="5.117657203s" podCreationTimestamp="2025-11-25 17:05:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:05:48.108178197 +0000 UTC m=+1122.948320292" watchObservedRunningTime="2025-11-25 17:05:48.117657203 +0000 UTC m=+1122.957799298" Nov 25 17:05:49 crc kubenswrapper[4812]: I1125 17:05:49.091238 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a19d6900-fbec-4d99-9a22-6574b637ab4c","Type":"ContainerStarted","Data":"1466354702b50330746721bcd5f88e45cb8be6b54055d321e60bee7ecb381af1"} Nov 25 17:05:49 crc kubenswrapper[4812]: I1125 17:05:49.482079 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 25 17:05:50 crc kubenswrapper[4812]: I1125 17:05:50.945622 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-mg5b8"] Nov 25 17:05:50 crc kubenswrapper[4812]: E1125 17:05:50.946272 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="941bf985-0c3b-4972-b4fe-182f891de32f" containerName="mariadb-database-create" Nov 25 17:05:50 crc kubenswrapper[4812]: I1125 17:05:50.946289 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="941bf985-0c3b-4972-b4fe-182f891de32f" containerName="mariadb-database-create" Nov 25 17:05:50 crc kubenswrapper[4812]: E1125 17:05:50.946304 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aff51d33-8e62-4c99-bc89-c6e53270b60c" containerName="neutron-api" Nov 25 17:05:50 crc kubenswrapper[4812]: I1125 17:05:50.946310 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="aff51d33-8e62-4c99-bc89-c6e53270b60c" containerName="neutron-api" Nov 25 17:05:50 crc kubenswrapper[4812]: E1125 17:05:50.946324 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ffbf750-9ce8-454e-a031-eb09ebaa34a5" containerName="mariadb-database-create" Nov 25 17:05:50 crc kubenswrapper[4812]: I1125 17:05:50.946330 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ffbf750-9ce8-454e-a031-eb09ebaa34a5" containerName="mariadb-database-create" Nov 25 17:05:50 crc kubenswrapper[4812]: E1125 17:05:50.946337 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aff51d33-8e62-4c99-bc89-c6e53270b60c" containerName="neutron-httpd" Nov 25 17:05:50 crc kubenswrapper[4812]: I1125 17:05:50.946342 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="aff51d33-8e62-4c99-bc89-c6e53270b60c" containerName="neutron-httpd" Nov 25 17:05:50 crc kubenswrapper[4812]: E1125 17:05:50.946357 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a32a31d-c114-4e2d-a8d8-5a6c0f0e518b" containerName="mariadb-account-create" Nov 25 17:05:50 crc kubenswrapper[4812]: I1125 17:05:50.946363 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a32a31d-c114-4e2d-a8d8-5a6c0f0e518b" containerName="mariadb-account-create" Nov 25 17:05:50 crc kubenswrapper[4812]: E1125 17:05:50.946380 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64f71340-850a-4cf0-880a-5760dc7e1c4f" containerName="mariadb-database-create" Nov 25 17:05:50 crc kubenswrapper[4812]: I1125 17:05:50.946385 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="64f71340-850a-4cf0-880a-5760dc7e1c4f" containerName="mariadb-database-create" Nov 25 17:05:50 crc kubenswrapper[4812]: E1125 17:05:50.946396 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4d2ad77-439d-4039-98b1-f26eadcc542e" containerName="mariadb-account-create" Nov 25 17:05:50 crc kubenswrapper[4812]: I1125 17:05:50.946402 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4d2ad77-439d-4039-98b1-f26eadcc542e" containerName="mariadb-account-create" Nov 25 17:05:50 crc kubenswrapper[4812]: E1125 17:05:50.946415 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ba390cc-ed29-4af9-b3b5-582f5c3de736" containerName="mariadb-account-create" Nov 25 17:05:50 crc kubenswrapper[4812]: I1125 17:05:50.946423 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ba390cc-ed29-4af9-b3b5-582f5c3de736" containerName="mariadb-account-create" Nov 25 17:05:50 crc kubenswrapper[4812]: I1125 17:05:50.946589 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a32a31d-c114-4e2d-a8d8-5a6c0f0e518b" containerName="mariadb-account-create" Nov 25 17:05:50 crc kubenswrapper[4812]: I1125 17:05:50.946600 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="941bf985-0c3b-4972-b4fe-182f891de32f" containerName="mariadb-database-create" Nov 25 17:05:50 crc kubenswrapper[4812]: I1125 17:05:50.946609 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4d2ad77-439d-4039-98b1-f26eadcc542e" containerName="mariadb-account-create" Nov 25 17:05:50 crc kubenswrapper[4812]: I1125 17:05:50.946621 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="aff51d33-8e62-4c99-bc89-c6e53270b60c" containerName="neutron-httpd" Nov 25 17:05:50 crc kubenswrapper[4812]: I1125 17:05:50.946633 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ba390cc-ed29-4af9-b3b5-582f5c3de736" containerName="mariadb-account-create" Nov 25 17:05:50 crc kubenswrapper[4812]: I1125 17:05:50.946642 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="aff51d33-8e62-4c99-bc89-c6e53270b60c" containerName="neutron-api" Nov 25 17:05:50 crc kubenswrapper[4812]: I1125 17:05:50.946652 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ffbf750-9ce8-454e-a031-eb09ebaa34a5" containerName="mariadb-database-create" Nov 25 17:05:50 crc kubenswrapper[4812]: I1125 17:05:50.946664 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="64f71340-850a-4cf0-880a-5760dc7e1c4f" containerName="mariadb-database-create" Nov 25 17:05:50 crc kubenswrapper[4812]: I1125 17:05:50.947212 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-mg5b8" Nov 25 17:05:50 crc kubenswrapper[4812]: I1125 17:05:50.952717 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 25 17:05:50 crc kubenswrapper[4812]: I1125 17:05:50.952876 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 25 17:05:50 crc kubenswrapper[4812]: I1125 17:05:50.953054 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-8hgbb" Nov 25 17:05:50 crc kubenswrapper[4812]: I1125 17:05:50.961640 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-mg5b8"] Nov 25 17:05:51 crc kubenswrapper[4812]: I1125 17:05:51.079815 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nkpsb\" (UniqueName: \"kubernetes.io/projected/d944d86b-9bc4-4360-89ce-07220fc618ea-kube-api-access-nkpsb\") pod \"nova-cell0-conductor-db-sync-mg5b8\" (UID: \"d944d86b-9bc4-4360-89ce-07220fc618ea\") " pod="openstack/nova-cell0-conductor-db-sync-mg5b8" Nov 25 17:05:51 crc kubenswrapper[4812]: I1125 17:05:51.080140 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d944d86b-9bc4-4360-89ce-07220fc618ea-scripts\") pod \"nova-cell0-conductor-db-sync-mg5b8\" (UID: \"d944d86b-9bc4-4360-89ce-07220fc618ea\") " pod="openstack/nova-cell0-conductor-db-sync-mg5b8" Nov 25 17:05:51 crc kubenswrapper[4812]: I1125 17:05:51.080188 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d944d86b-9bc4-4360-89ce-07220fc618ea-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-mg5b8\" (UID: \"d944d86b-9bc4-4360-89ce-07220fc618ea\") " pod="openstack/nova-cell0-conductor-db-sync-mg5b8" Nov 25 17:05:51 crc kubenswrapper[4812]: I1125 17:05:51.080241 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d944d86b-9bc4-4360-89ce-07220fc618ea-config-data\") pod \"nova-cell0-conductor-db-sync-mg5b8\" (UID: \"d944d86b-9bc4-4360-89ce-07220fc618ea\") " pod="openstack/nova-cell0-conductor-db-sync-mg5b8" Nov 25 17:05:51 crc kubenswrapper[4812]: I1125 17:05:51.113443 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a19d6900-fbec-4d99-9a22-6574b637ab4c","Type":"ContainerStarted","Data":"cd70d97afa74d43f48ebf251ea8e6b75cfbf7f20e675fb567310272f43d8221f"} Nov 25 17:05:51 crc kubenswrapper[4812]: I1125 17:05:51.113663 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a19d6900-fbec-4d99-9a22-6574b637ab4c" containerName="ceilometer-central-agent" containerID="cri-o://ecf80a1d993c72fc1b003327ae06d9881adae76bb823c11909228e138c25cc50" gracePeriod=30 Nov 25 17:05:51 crc kubenswrapper[4812]: I1125 17:05:51.113944 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 17:05:51 crc kubenswrapper[4812]: I1125 17:05:51.114212 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a19d6900-fbec-4d99-9a22-6574b637ab4c" containerName="proxy-httpd" containerID="cri-o://cd70d97afa74d43f48ebf251ea8e6b75cfbf7f20e675fb567310272f43d8221f" gracePeriod=30 Nov 25 17:05:51 crc kubenswrapper[4812]: I1125 17:05:51.114277 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a19d6900-fbec-4d99-9a22-6574b637ab4c" containerName="sg-core" containerID="cri-o://1466354702b50330746721bcd5f88e45cb8be6b54055d321e60bee7ecb381af1" gracePeriod=30 Nov 25 17:05:51 crc kubenswrapper[4812]: I1125 17:05:51.114330 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a19d6900-fbec-4d99-9a22-6574b637ab4c" containerName="ceilometer-notification-agent" containerID="cri-o://294794b54f41656c5ee39a3701d8c62a39507c75c9bf7b773a6367db763ed660" gracePeriod=30 Nov 25 17:05:51 crc kubenswrapper[4812]: I1125 17:05:51.182065 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nkpsb\" (UniqueName: \"kubernetes.io/projected/d944d86b-9bc4-4360-89ce-07220fc618ea-kube-api-access-nkpsb\") pod \"nova-cell0-conductor-db-sync-mg5b8\" (UID: \"d944d86b-9bc4-4360-89ce-07220fc618ea\") " pod="openstack/nova-cell0-conductor-db-sync-mg5b8" Nov 25 17:05:51 crc kubenswrapper[4812]: I1125 17:05:51.182116 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d944d86b-9bc4-4360-89ce-07220fc618ea-scripts\") pod \"nova-cell0-conductor-db-sync-mg5b8\" (UID: \"d944d86b-9bc4-4360-89ce-07220fc618ea\") " pod="openstack/nova-cell0-conductor-db-sync-mg5b8" Nov 25 17:05:51 crc kubenswrapper[4812]: I1125 17:05:51.182152 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d944d86b-9bc4-4360-89ce-07220fc618ea-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-mg5b8\" (UID: \"d944d86b-9bc4-4360-89ce-07220fc618ea\") " pod="openstack/nova-cell0-conductor-db-sync-mg5b8" Nov 25 17:05:51 crc kubenswrapper[4812]: I1125 17:05:51.182195 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d944d86b-9bc4-4360-89ce-07220fc618ea-config-data\") pod \"nova-cell0-conductor-db-sync-mg5b8\" (UID: \"d944d86b-9bc4-4360-89ce-07220fc618ea\") " pod="openstack/nova-cell0-conductor-db-sync-mg5b8" Nov 25 17:05:51 crc kubenswrapper[4812]: I1125 17:05:51.190311 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d944d86b-9bc4-4360-89ce-07220fc618ea-scripts\") pod \"nova-cell0-conductor-db-sync-mg5b8\" (UID: \"d944d86b-9bc4-4360-89ce-07220fc618ea\") " pod="openstack/nova-cell0-conductor-db-sync-mg5b8" Nov 25 17:05:51 crc kubenswrapper[4812]: I1125 17:05:51.190382 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d944d86b-9bc4-4360-89ce-07220fc618ea-config-data\") pod \"nova-cell0-conductor-db-sync-mg5b8\" (UID: \"d944d86b-9bc4-4360-89ce-07220fc618ea\") " pod="openstack/nova-cell0-conductor-db-sync-mg5b8" Nov 25 17:05:51 crc kubenswrapper[4812]: I1125 17:05:51.190589 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d944d86b-9bc4-4360-89ce-07220fc618ea-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-mg5b8\" (UID: \"d944d86b-9bc4-4360-89ce-07220fc618ea\") " pod="openstack/nova-cell0-conductor-db-sync-mg5b8" Nov 25 17:05:51 crc kubenswrapper[4812]: I1125 17:05:51.202091 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nkpsb\" (UniqueName: \"kubernetes.io/projected/d944d86b-9bc4-4360-89ce-07220fc618ea-kube-api-access-nkpsb\") pod \"nova-cell0-conductor-db-sync-mg5b8\" (UID: \"d944d86b-9bc4-4360-89ce-07220fc618ea\") " pod="openstack/nova-cell0-conductor-db-sync-mg5b8" Nov 25 17:05:51 crc kubenswrapper[4812]: I1125 17:05:51.262678 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-mg5b8" Nov 25 17:05:51 crc kubenswrapper[4812]: I1125 17:05:51.704855 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.813294426 podStartE2EDuration="7.704835426s" podCreationTimestamp="2025-11-25 17:05:44 +0000 UTC" firstStartedPulling="2025-11-25 17:05:45.117372943 +0000 UTC m=+1119.957515038" lastFinishedPulling="2025-11-25 17:05:50.008913933 +0000 UTC m=+1124.849056038" observedRunningTime="2025-11-25 17:05:51.143728058 +0000 UTC m=+1125.983870143" watchObservedRunningTime="2025-11-25 17:05:51.704835426 +0000 UTC m=+1126.544977531" Nov 25 17:05:51 crc kubenswrapper[4812]: W1125 17:05:51.714890 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd944d86b_9bc4_4360_89ce_07220fc618ea.slice/crio-6358489253c9ed12601085388087ade007d044d21989f58649ef3f5ba374f033 WatchSource:0}: Error finding container 6358489253c9ed12601085388087ade007d044d21989f58649ef3f5ba374f033: Status 404 returned error can't find the container with id 6358489253c9ed12601085388087ade007d044d21989f58649ef3f5ba374f033 Nov 25 17:05:51 crc kubenswrapper[4812]: I1125 17:05:51.719089 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-mg5b8"] Nov 25 17:05:52 crc kubenswrapper[4812]: I1125 17:05:52.122316 4812 generic.go:334] "Generic (PLEG): container finished" podID="a19d6900-fbec-4d99-9a22-6574b637ab4c" containerID="cd70d97afa74d43f48ebf251ea8e6b75cfbf7f20e675fb567310272f43d8221f" exitCode=0 Nov 25 17:05:52 crc kubenswrapper[4812]: I1125 17:05:52.122652 4812 generic.go:334] "Generic (PLEG): container finished" podID="a19d6900-fbec-4d99-9a22-6574b637ab4c" containerID="1466354702b50330746721bcd5f88e45cb8be6b54055d321e60bee7ecb381af1" exitCode=2 Nov 25 17:05:52 crc kubenswrapper[4812]: I1125 17:05:52.122663 4812 generic.go:334] "Generic (PLEG): container finished" podID="a19d6900-fbec-4d99-9a22-6574b637ab4c" containerID="294794b54f41656c5ee39a3701d8c62a39507c75c9bf7b773a6367db763ed660" exitCode=0 Nov 25 17:05:52 crc kubenswrapper[4812]: I1125 17:05:52.122392 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a19d6900-fbec-4d99-9a22-6574b637ab4c","Type":"ContainerDied","Data":"cd70d97afa74d43f48ebf251ea8e6b75cfbf7f20e675fb567310272f43d8221f"} Nov 25 17:05:52 crc kubenswrapper[4812]: I1125 17:05:52.122731 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a19d6900-fbec-4d99-9a22-6574b637ab4c","Type":"ContainerDied","Data":"1466354702b50330746721bcd5f88e45cb8be6b54055d321e60bee7ecb381af1"} Nov 25 17:05:52 crc kubenswrapper[4812]: I1125 17:05:52.122746 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a19d6900-fbec-4d99-9a22-6574b637ab4c","Type":"ContainerDied","Data":"294794b54f41656c5ee39a3701d8c62a39507c75c9bf7b773a6367db763ed660"} Nov 25 17:05:52 crc kubenswrapper[4812]: I1125 17:05:52.123554 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-mg5b8" event={"ID":"d944d86b-9bc4-4360-89ce-07220fc618ea","Type":"ContainerStarted","Data":"6358489253c9ed12601085388087ade007d044d21989f58649ef3f5ba374f033"} Nov 25 17:05:54 crc kubenswrapper[4812]: I1125 17:05:54.428434 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 25 17:05:54 crc kubenswrapper[4812]: I1125 17:05:54.738258 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 25 17:05:55 crc kubenswrapper[4812]: I1125 17:05:55.151576 4812 generic.go:334] "Generic (PLEG): container finished" podID="a19d6900-fbec-4d99-9a22-6574b637ab4c" containerID="ecf80a1d993c72fc1b003327ae06d9881adae76bb823c11909228e138c25cc50" exitCode=0 Nov 25 17:05:55 crc kubenswrapper[4812]: I1125 17:05:55.151642 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a19d6900-fbec-4d99-9a22-6574b637ab4c","Type":"ContainerDied","Data":"ecf80a1d993c72fc1b003327ae06d9881adae76bb823c11909228e138c25cc50"} Nov 25 17:05:59 crc kubenswrapper[4812]: I1125 17:05:59.349789 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 17:05:59 crc kubenswrapper[4812]: I1125 17:05:59.531091 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/a19d6900-fbec-4d99-9a22-6574b637ab4c-ceilometer-tls-certs\") pod \"a19d6900-fbec-4d99-9a22-6574b637ab4c\" (UID: \"a19d6900-fbec-4d99-9a22-6574b637ab4c\") " Nov 25 17:05:59 crc kubenswrapper[4812]: I1125 17:05:59.531873 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a19d6900-fbec-4d99-9a22-6574b637ab4c-scripts\") pod \"a19d6900-fbec-4d99-9a22-6574b637ab4c\" (UID: \"a19d6900-fbec-4d99-9a22-6574b637ab4c\") " Nov 25 17:05:59 crc kubenswrapper[4812]: I1125 17:05:59.531984 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a19d6900-fbec-4d99-9a22-6574b637ab4c-run-httpd\") pod \"a19d6900-fbec-4d99-9a22-6574b637ab4c\" (UID: \"a19d6900-fbec-4d99-9a22-6574b637ab4c\") " Nov 25 17:05:59 crc kubenswrapper[4812]: I1125 17:05:59.532031 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a19d6900-fbec-4d99-9a22-6574b637ab4c-combined-ca-bundle\") pod \"a19d6900-fbec-4d99-9a22-6574b637ab4c\" (UID: \"a19d6900-fbec-4d99-9a22-6574b637ab4c\") " Nov 25 17:05:59 crc kubenswrapper[4812]: I1125 17:05:59.532057 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a19d6900-fbec-4d99-9a22-6574b637ab4c-config-data\") pod \"a19d6900-fbec-4d99-9a22-6574b637ab4c\" (UID: \"a19d6900-fbec-4d99-9a22-6574b637ab4c\") " Nov 25 17:05:59 crc kubenswrapper[4812]: I1125 17:05:59.532198 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a19d6900-fbec-4d99-9a22-6574b637ab4c-sg-core-conf-yaml\") pod \"a19d6900-fbec-4d99-9a22-6574b637ab4c\" (UID: \"a19d6900-fbec-4d99-9a22-6574b637ab4c\") " Nov 25 17:05:59 crc kubenswrapper[4812]: I1125 17:05:59.532222 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7wc6w\" (UniqueName: \"kubernetes.io/projected/a19d6900-fbec-4d99-9a22-6574b637ab4c-kube-api-access-7wc6w\") pod \"a19d6900-fbec-4d99-9a22-6574b637ab4c\" (UID: \"a19d6900-fbec-4d99-9a22-6574b637ab4c\") " Nov 25 17:05:59 crc kubenswrapper[4812]: I1125 17:05:59.532271 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a19d6900-fbec-4d99-9a22-6574b637ab4c-log-httpd\") pod \"a19d6900-fbec-4d99-9a22-6574b637ab4c\" (UID: \"a19d6900-fbec-4d99-9a22-6574b637ab4c\") " Nov 25 17:05:59 crc kubenswrapper[4812]: I1125 17:05:59.532802 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a19d6900-fbec-4d99-9a22-6574b637ab4c-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "a19d6900-fbec-4d99-9a22-6574b637ab4c" (UID: "a19d6900-fbec-4d99-9a22-6574b637ab4c"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:05:59 crc kubenswrapper[4812]: I1125 17:05:59.533284 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a19d6900-fbec-4d99-9a22-6574b637ab4c-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "a19d6900-fbec-4d99-9a22-6574b637ab4c" (UID: "a19d6900-fbec-4d99-9a22-6574b637ab4c"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:05:59 crc kubenswrapper[4812]: I1125 17:05:59.534073 4812 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a19d6900-fbec-4d99-9a22-6574b637ab4c-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:59 crc kubenswrapper[4812]: I1125 17:05:59.534089 4812 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a19d6900-fbec-4d99-9a22-6574b637ab4c-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:59 crc kubenswrapper[4812]: I1125 17:05:59.538800 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a19d6900-fbec-4d99-9a22-6574b637ab4c-scripts" (OuterVolumeSpecName: "scripts") pod "a19d6900-fbec-4d99-9a22-6574b637ab4c" (UID: "a19d6900-fbec-4d99-9a22-6574b637ab4c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:05:59 crc kubenswrapper[4812]: I1125 17:05:59.549763 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a19d6900-fbec-4d99-9a22-6574b637ab4c-kube-api-access-7wc6w" (OuterVolumeSpecName: "kube-api-access-7wc6w") pod "a19d6900-fbec-4d99-9a22-6574b637ab4c" (UID: "a19d6900-fbec-4d99-9a22-6574b637ab4c"). InnerVolumeSpecName "kube-api-access-7wc6w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:05:59 crc kubenswrapper[4812]: I1125 17:05:59.562128 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a19d6900-fbec-4d99-9a22-6574b637ab4c-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "a19d6900-fbec-4d99-9a22-6574b637ab4c" (UID: "a19d6900-fbec-4d99-9a22-6574b637ab4c"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:05:59 crc kubenswrapper[4812]: I1125 17:05:59.588938 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a19d6900-fbec-4d99-9a22-6574b637ab4c-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "a19d6900-fbec-4d99-9a22-6574b637ab4c" (UID: "a19d6900-fbec-4d99-9a22-6574b637ab4c"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:05:59 crc kubenswrapper[4812]: I1125 17:05:59.602559 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a19d6900-fbec-4d99-9a22-6574b637ab4c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a19d6900-fbec-4d99-9a22-6574b637ab4c" (UID: "a19d6900-fbec-4d99-9a22-6574b637ab4c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:05:59 crc kubenswrapper[4812]: I1125 17:05:59.633819 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a19d6900-fbec-4d99-9a22-6574b637ab4c-config-data" (OuterVolumeSpecName: "config-data") pod "a19d6900-fbec-4d99-9a22-6574b637ab4c" (UID: "a19d6900-fbec-4d99-9a22-6574b637ab4c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:05:59 crc kubenswrapper[4812]: I1125 17:05:59.635954 4812 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/a19d6900-fbec-4d99-9a22-6574b637ab4c-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:59 crc kubenswrapper[4812]: I1125 17:05:59.635983 4812 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a19d6900-fbec-4d99-9a22-6574b637ab4c-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:59 crc kubenswrapper[4812]: I1125 17:05:59.635995 4812 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a19d6900-fbec-4d99-9a22-6574b637ab4c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:59 crc kubenswrapper[4812]: I1125 17:05:59.636006 4812 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a19d6900-fbec-4d99-9a22-6574b637ab4c-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:59 crc kubenswrapper[4812]: I1125 17:05:59.636017 4812 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a19d6900-fbec-4d99-9a22-6574b637ab4c-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 17:05:59 crc kubenswrapper[4812]: I1125 17:05:59.636028 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7wc6w\" (UniqueName: \"kubernetes.io/projected/a19d6900-fbec-4d99-9a22-6574b637ab4c-kube-api-access-7wc6w\") on node \"crc\" DevicePath \"\"" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.220769 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.220782 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a19d6900-fbec-4d99-9a22-6574b637ab4c","Type":"ContainerDied","Data":"12a83b9e9bcef1ec0cb0a7798bbf853bc533a5b14a9ce4c50619969afed92973"} Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.220888 4812 scope.go:117] "RemoveContainer" containerID="cd70d97afa74d43f48ebf251ea8e6b75cfbf7f20e675fb567310272f43d8221f" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.223159 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-mg5b8" event={"ID":"d944d86b-9bc4-4360-89ce-07220fc618ea","Type":"ContainerStarted","Data":"de61b62cf3c6fbc1ac37cc3446a1c1d41accc0290a006a6d26dbae2ed0272711"} Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.244894 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.246421 4812 scope.go:117] "RemoveContainer" containerID="1466354702b50330746721bcd5f88e45cb8be6b54055d321e60bee7ecb381af1" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.255992 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.266604 4812 scope.go:117] "RemoveContainer" containerID="294794b54f41656c5ee39a3701d8c62a39507c75c9bf7b773a6367db763ed660" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.275930 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-mg5b8" podStartSLOduration=2.024753172 podStartE2EDuration="10.275901469s" podCreationTimestamp="2025-11-25 17:05:50 +0000 UTC" firstStartedPulling="2025-11-25 17:05:51.717869138 +0000 UTC m=+1126.558011233" lastFinishedPulling="2025-11-25 17:05:59.969017445 +0000 UTC m=+1134.809159530" observedRunningTime="2025-11-25 17:06:00.264962864 +0000 UTC m=+1135.105104959" watchObservedRunningTime="2025-11-25 17:06:00.275901469 +0000 UTC m=+1135.116043564" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.288251 4812 scope.go:117] "RemoveContainer" containerID="ecf80a1d993c72fc1b003327ae06d9881adae76bb823c11909228e138c25cc50" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.298025 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:06:00 crc kubenswrapper[4812]: E1125 17:06:00.298463 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a19d6900-fbec-4d99-9a22-6574b637ab4c" containerName="ceilometer-central-agent" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.298484 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="a19d6900-fbec-4d99-9a22-6574b637ab4c" containerName="ceilometer-central-agent" Nov 25 17:06:00 crc kubenswrapper[4812]: E1125 17:06:00.298506 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a19d6900-fbec-4d99-9a22-6574b637ab4c" containerName="sg-core" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.298514 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="a19d6900-fbec-4d99-9a22-6574b637ab4c" containerName="sg-core" Nov 25 17:06:00 crc kubenswrapper[4812]: E1125 17:06:00.298545 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a19d6900-fbec-4d99-9a22-6574b637ab4c" containerName="proxy-httpd" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.298556 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="a19d6900-fbec-4d99-9a22-6574b637ab4c" containerName="proxy-httpd" Nov 25 17:06:00 crc kubenswrapper[4812]: E1125 17:06:00.298591 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a19d6900-fbec-4d99-9a22-6574b637ab4c" containerName="ceilometer-notification-agent" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.298599 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="a19d6900-fbec-4d99-9a22-6574b637ab4c" containerName="ceilometer-notification-agent" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.298795 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="a19d6900-fbec-4d99-9a22-6574b637ab4c" containerName="ceilometer-central-agent" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.298810 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="a19d6900-fbec-4d99-9a22-6574b637ab4c" containerName="ceilometer-notification-agent" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.298840 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="a19d6900-fbec-4d99-9a22-6574b637ab4c" containerName="proxy-httpd" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.298856 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="a19d6900-fbec-4d99-9a22-6574b637ab4c" containerName="sg-core" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.300737 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.305082 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.305334 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.305733 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.312328 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.449270 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bpmpm\" (UniqueName: \"kubernetes.io/projected/59aee660-242e-4407-981d-224efed2b599-kube-api-access-bpmpm\") pod \"ceilometer-0\" (UID: \"59aee660-242e-4407-981d-224efed2b599\") " pod="openstack/ceilometer-0" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.449338 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/59aee660-242e-4407-981d-224efed2b599-run-httpd\") pod \"ceilometer-0\" (UID: \"59aee660-242e-4407-981d-224efed2b599\") " pod="openstack/ceilometer-0" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.449372 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/59aee660-242e-4407-981d-224efed2b599-log-httpd\") pod \"ceilometer-0\" (UID: \"59aee660-242e-4407-981d-224efed2b599\") " pod="openstack/ceilometer-0" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.449392 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/59aee660-242e-4407-981d-224efed2b599-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"59aee660-242e-4407-981d-224efed2b599\") " pod="openstack/ceilometer-0" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.449443 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/59aee660-242e-4407-981d-224efed2b599-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"59aee660-242e-4407-981d-224efed2b599\") " pod="openstack/ceilometer-0" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.449481 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59aee660-242e-4407-981d-224efed2b599-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"59aee660-242e-4407-981d-224efed2b599\") " pod="openstack/ceilometer-0" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.449512 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59aee660-242e-4407-981d-224efed2b599-config-data\") pod \"ceilometer-0\" (UID: \"59aee660-242e-4407-981d-224efed2b599\") " pod="openstack/ceilometer-0" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.449769 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/59aee660-242e-4407-981d-224efed2b599-scripts\") pod \"ceilometer-0\" (UID: \"59aee660-242e-4407-981d-224efed2b599\") " pod="openstack/ceilometer-0" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.551586 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bpmpm\" (UniqueName: \"kubernetes.io/projected/59aee660-242e-4407-981d-224efed2b599-kube-api-access-bpmpm\") pod \"ceilometer-0\" (UID: \"59aee660-242e-4407-981d-224efed2b599\") " pod="openstack/ceilometer-0" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.551640 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/59aee660-242e-4407-981d-224efed2b599-run-httpd\") pod \"ceilometer-0\" (UID: \"59aee660-242e-4407-981d-224efed2b599\") " pod="openstack/ceilometer-0" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.551673 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/59aee660-242e-4407-981d-224efed2b599-log-httpd\") pod \"ceilometer-0\" (UID: \"59aee660-242e-4407-981d-224efed2b599\") " pod="openstack/ceilometer-0" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.551694 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/59aee660-242e-4407-981d-224efed2b599-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"59aee660-242e-4407-981d-224efed2b599\") " pod="openstack/ceilometer-0" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.551745 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/59aee660-242e-4407-981d-224efed2b599-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"59aee660-242e-4407-981d-224efed2b599\") " pod="openstack/ceilometer-0" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.551781 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59aee660-242e-4407-981d-224efed2b599-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"59aee660-242e-4407-981d-224efed2b599\") " pod="openstack/ceilometer-0" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.551805 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59aee660-242e-4407-981d-224efed2b599-config-data\") pod \"ceilometer-0\" (UID: \"59aee660-242e-4407-981d-224efed2b599\") " pod="openstack/ceilometer-0" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.551840 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/59aee660-242e-4407-981d-224efed2b599-scripts\") pod \"ceilometer-0\" (UID: \"59aee660-242e-4407-981d-224efed2b599\") " pod="openstack/ceilometer-0" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.553291 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/59aee660-242e-4407-981d-224efed2b599-run-httpd\") pod \"ceilometer-0\" (UID: \"59aee660-242e-4407-981d-224efed2b599\") " pod="openstack/ceilometer-0" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.553334 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/59aee660-242e-4407-981d-224efed2b599-log-httpd\") pod \"ceilometer-0\" (UID: \"59aee660-242e-4407-981d-224efed2b599\") " pod="openstack/ceilometer-0" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.558240 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59aee660-242e-4407-981d-224efed2b599-config-data\") pod \"ceilometer-0\" (UID: \"59aee660-242e-4407-981d-224efed2b599\") " pod="openstack/ceilometer-0" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.558471 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/59aee660-242e-4407-981d-224efed2b599-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"59aee660-242e-4407-981d-224efed2b599\") " pod="openstack/ceilometer-0" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.558997 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59aee660-242e-4407-981d-224efed2b599-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"59aee660-242e-4407-981d-224efed2b599\") " pod="openstack/ceilometer-0" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.565817 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/59aee660-242e-4407-981d-224efed2b599-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"59aee660-242e-4407-981d-224efed2b599\") " pod="openstack/ceilometer-0" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.565879 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/59aee660-242e-4407-981d-224efed2b599-scripts\") pod \"ceilometer-0\" (UID: \"59aee660-242e-4407-981d-224efed2b599\") " pod="openstack/ceilometer-0" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.568471 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bpmpm\" (UniqueName: \"kubernetes.io/projected/59aee660-242e-4407-981d-224efed2b599-kube-api-access-bpmpm\") pod \"ceilometer-0\" (UID: \"59aee660-242e-4407-981d-224efed2b599\") " pod="openstack/ceilometer-0" Nov 25 17:06:00 crc kubenswrapper[4812]: I1125 17:06:00.637215 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 17:06:01 crc kubenswrapper[4812]: I1125 17:06:01.065860 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:06:01 crc kubenswrapper[4812]: W1125 17:06:01.066086 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod59aee660_242e_4407_981d_224efed2b599.slice/crio-afa451bca8c6864552831537d0b0fe14e02ec3f595d4d1a2de51629ab604ae1d WatchSource:0}: Error finding container afa451bca8c6864552831537d0b0fe14e02ec3f595d4d1a2de51629ab604ae1d: Status 404 returned error can't find the container with id afa451bca8c6864552831537d0b0fe14e02ec3f595d4d1a2de51629ab604ae1d Nov 25 17:06:01 crc kubenswrapper[4812]: I1125 17:06:01.105690 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:06:01 crc kubenswrapper[4812]: I1125 17:06:01.231176 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"59aee660-242e-4407-981d-224efed2b599","Type":"ContainerStarted","Data":"afa451bca8c6864552831537d0b0fe14e02ec3f595d4d1a2de51629ab604ae1d"} Nov 25 17:06:01 crc kubenswrapper[4812]: I1125 17:06:01.843179 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a19d6900-fbec-4d99-9a22-6574b637ab4c" path="/var/lib/kubelet/pods/a19d6900-fbec-4d99-9a22-6574b637ab4c/volumes" Nov 25 17:06:02 crc kubenswrapper[4812]: I1125 17:06:02.242161 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"59aee660-242e-4407-981d-224efed2b599","Type":"ContainerStarted","Data":"8950ecf13067c1bda9356a9d78c647186220be2498ccd62b9899fa23b292367e"} Nov 25 17:06:03 crc kubenswrapper[4812]: I1125 17:06:03.262715 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"59aee660-242e-4407-981d-224efed2b599","Type":"ContainerStarted","Data":"db8d01af0205be335df8f694fa59b47476d78a3081f4e872bbcec5dd644d1670"} Nov 25 17:06:04 crc kubenswrapper[4812]: I1125 17:06:04.280107 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"59aee660-242e-4407-981d-224efed2b599","Type":"ContainerStarted","Data":"c44b67ef467f253e0323d66f3ce0514790096b5037bbb55f50b15732bca95a5f"} Nov 25 17:06:05 crc kubenswrapper[4812]: I1125 17:06:05.296682 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"59aee660-242e-4407-981d-224efed2b599","Type":"ContainerStarted","Data":"88edba3087801ecb8ae6a9e94ca8f33037f42cc95b435d7ae3f1c8573d07cc64"} Nov 25 17:06:05 crc kubenswrapper[4812]: I1125 17:06:05.296844 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="59aee660-242e-4407-981d-224efed2b599" containerName="ceilometer-central-agent" containerID="cri-o://8950ecf13067c1bda9356a9d78c647186220be2498ccd62b9899fa23b292367e" gracePeriod=30 Nov 25 17:06:05 crc kubenswrapper[4812]: I1125 17:06:05.297146 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 17:06:05 crc kubenswrapper[4812]: I1125 17:06:05.297446 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="59aee660-242e-4407-981d-224efed2b599" containerName="proxy-httpd" containerID="cri-o://88edba3087801ecb8ae6a9e94ca8f33037f42cc95b435d7ae3f1c8573d07cc64" gracePeriod=30 Nov 25 17:06:05 crc kubenswrapper[4812]: I1125 17:06:05.297520 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="59aee660-242e-4407-981d-224efed2b599" containerName="sg-core" containerID="cri-o://c44b67ef467f253e0323d66f3ce0514790096b5037bbb55f50b15732bca95a5f" gracePeriod=30 Nov 25 17:06:05 crc kubenswrapper[4812]: I1125 17:06:05.297587 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="59aee660-242e-4407-981d-224efed2b599" containerName="ceilometer-notification-agent" containerID="cri-o://db8d01af0205be335df8f694fa59b47476d78a3081f4e872bbcec5dd644d1670" gracePeriod=30 Nov 25 17:06:05 crc kubenswrapper[4812]: I1125 17:06:05.328222 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.944914308 podStartE2EDuration="5.328189974s" podCreationTimestamp="2025-11-25 17:06:00 +0000 UTC" firstStartedPulling="2025-11-25 17:06:01.068660343 +0000 UTC m=+1135.908802438" lastFinishedPulling="2025-11-25 17:06:04.451936009 +0000 UTC m=+1139.292078104" observedRunningTime="2025-11-25 17:06:05.321081812 +0000 UTC m=+1140.161223907" watchObservedRunningTime="2025-11-25 17:06:05.328189974 +0000 UTC m=+1140.168332069" Nov 25 17:06:06 crc kubenswrapper[4812]: I1125 17:06:06.307567 4812 generic.go:334] "Generic (PLEG): container finished" podID="59aee660-242e-4407-981d-224efed2b599" containerID="88edba3087801ecb8ae6a9e94ca8f33037f42cc95b435d7ae3f1c8573d07cc64" exitCode=0 Nov 25 17:06:06 crc kubenswrapper[4812]: I1125 17:06:06.307861 4812 generic.go:334] "Generic (PLEG): container finished" podID="59aee660-242e-4407-981d-224efed2b599" containerID="c44b67ef467f253e0323d66f3ce0514790096b5037bbb55f50b15732bca95a5f" exitCode=2 Nov 25 17:06:06 crc kubenswrapper[4812]: I1125 17:06:06.307870 4812 generic.go:334] "Generic (PLEG): container finished" podID="59aee660-242e-4407-981d-224efed2b599" containerID="db8d01af0205be335df8f694fa59b47476d78a3081f4e872bbcec5dd644d1670" exitCode=0 Nov 25 17:06:06 crc kubenswrapper[4812]: I1125 17:06:06.307616 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"59aee660-242e-4407-981d-224efed2b599","Type":"ContainerDied","Data":"88edba3087801ecb8ae6a9e94ca8f33037f42cc95b435d7ae3f1c8573d07cc64"} Nov 25 17:06:06 crc kubenswrapper[4812]: I1125 17:06:06.307898 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"59aee660-242e-4407-981d-224efed2b599","Type":"ContainerDied","Data":"c44b67ef467f253e0323d66f3ce0514790096b5037bbb55f50b15732bca95a5f"} Nov 25 17:06:06 crc kubenswrapper[4812]: I1125 17:06:06.307908 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"59aee660-242e-4407-981d-224efed2b599","Type":"ContainerDied","Data":"db8d01af0205be335df8f694fa59b47476d78a3081f4e872bbcec5dd644d1670"} Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.030192 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.199250 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bpmpm\" (UniqueName: \"kubernetes.io/projected/59aee660-242e-4407-981d-224efed2b599-kube-api-access-bpmpm\") pod \"59aee660-242e-4407-981d-224efed2b599\" (UID: \"59aee660-242e-4407-981d-224efed2b599\") " Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.199327 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/59aee660-242e-4407-981d-224efed2b599-ceilometer-tls-certs\") pod \"59aee660-242e-4407-981d-224efed2b599\" (UID: \"59aee660-242e-4407-981d-224efed2b599\") " Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.199358 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/59aee660-242e-4407-981d-224efed2b599-run-httpd\") pod \"59aee660-242e-4407-981d-224efed2b599\" (UID: \"59aee660-242e-4407-981d-224efed2b599\") " Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.199424 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/59aee660-242e-4407-981d-224efed2b599-log-httpd\") pod \"59aee660-242e-4407-981d-224efed2b599\" (UID: \"59aee660-242e-4407-981d-224efed2b599\") " Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.199460 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/59aee660-242e-4407-981d-224efed2b599-sg-core-conf-yaml\") pod \"59aee660-242e-4407-981d-224efed2b599\" (UID: \"59aee660-242e-4407-981d-224efed2b599\") " Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.199515 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59aee660-242e-4407-981d-224efed2b599-combined-ca-bundle\") pod \"59aee660-242e-4407-981d-224efed2b599\" (UID: \"59aee660-242e-4407-981d-224efed2b599\") " Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.199576 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59aee660-242e-4407-981d-224efed2b599-config-data\") pod \"59aee660-242e-4407-981d-224efed2b599\" (UID: \"59aee660-242e-4407-981d-224efed2b599\") " Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.199600 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/59aee660-242e-4407-981d-224efed2b599-scripts\") pod \"59aee660-242e-4407-981d-224efed2b599\" (UID: \"59aee660-242e-4407-981d-224efed2b599\") " Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.200001 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/59aee660-242e-4407-981d-224efed2b599-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "59aee660-242e-4407-981d-224efed2b599" (UID: "59aee660-242e-4407-981d-224efed2b599"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.200199 4812 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/59aee660-242e-4407-981d-224efed2b599-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.200614 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/59aee660-242e-4407-981d-224efed2b599-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "59aee660-242e-4407-981d-224efed2b599" (UID: "59aee660-242e-4407-981d-224efed2b599"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.205262 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59aee660-242e-4407-981d-224efed2b599-kube-api-access-bpmpm" (OuterVolumeSpecName: "kube-api-access-bpmpm") pod "59aee660-242e-4407-981d-224efed2b599" (UID: "59aee660-242e-4407-981d-224efed2b599"). InnerVolumeSpecName "kube-api-access-bpmpm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.205766 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59aee660-242e-4407-981d-224efed2b599-scripts" (OuterVolumeSpecName: "scripts") pod "59aee660-242e-4407-981d-224efed2b599" (UID: "59aee660-242e-4407-981d-224efed2b599"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.226045 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59aee660-242e-4407-981d-224efed2b599-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "59aee660-242e-4407-981d-224efed2b599" (UID: "59aee660-242e-4407-981d-224efed2b599"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.243642 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59aee660-242e-4407-981d-224efed2b599-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "59aee660-242e-4407-981d-224efed2b599" (UID: "59aee660-242e-4407-981d-224efed2b599"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.266230 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59aee660-242e-4407-981d-224efed2b599-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "59aee660-242e-4407-981d-224efed2b599" (UID: "59aee660-242e-4407-981d-224efed2b599"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.292029 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59aee660-242e-4407-981d-224efed2b599-config-data" (OuterVolumeSpecName: "config-data") pod "59aee660-242e-4407-981d-224efed2b599" (UID: "59aee660-242e-4407-981d-224efed2b599"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.302220 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bpmpm\" (UniqueName: \"kubernetes.io/projected/59aee660-242e-4407-981d-224efed2b599-kube-api-access-bpmpm\") on node \"crc\" DevicePath \"\"" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.302256 4812 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/59aee660-242e-4407-981d-224efed2b599-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.302266 4812 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/59aee660-242e-4407-981d-224efed2b599-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.302274 4812 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/59aee660-242e-4407-981d-224efed2b599-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.302282 4812 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59aee660-242e-4407-981d-224efed2b599-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.302290 4812 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59aee660-242e-4407-981d-224efed2b599-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.302300 4812 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/59aee660-242e-4407-981d-224efed2b599-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.329000 4812 generic.go:334] "Generic (PLEG): container finished" podID="59aee660-242e-4407-981d-224efed2b599" containerID="8950ecf13067c1bda9356a9d78c647186220be2498ccd62b9899fa23b292367e" exitCode=0 Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.329043 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"59aee660-242e-4407-981d-224efed2b599","Type":"ContainerDied","Data":"8950ecf13067c1bda9356a9d78c647186220be2498ccd62b9899fa23b292367e"} Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.329058 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.329069 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"59aee660-242e-4407-981d-224efed2b599","Type":"ContainerDied","Data":"afa451bca8c6864552831537d0b0fe14e02ec3f595d4d1a2de51629ab604ae1d"} Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.329084 4812 scope.go:117] "RemoveContainer" containerID="88edba3087801ecb8ae6a9e94ca8f33037f42cc95b435d7ae3f1c8573d07cc64" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.358912 4812 scope.go:117] "RemoveContainer" containerID="c44b67ef467f253e0323d66f3ce0514790096b5037bbb55f50b15732bca95a5f" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.366022 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.373474 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.384724 4812 scope.go:117] "RemoveContainer" containerID="db8d01af0205be335df8f694fa59b47476d78a3081f4e872bbcec5dd644d1670" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.386366 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:06:08 crc kubenswrapper[4812]: E1125 17:06:08.386771 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59aee660-242e-4407-981d-224efed2b599" containerName="ceilometer-notification-agent" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.386798 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="59aee660-242e-4407-981d-224efed2b599" containerName="ceilometer-notification-agent" Nov 25 17:06:08 crc kubenswrapper[4812]: E1125 17:06:08.386820 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59aee660-242e-4407-981d-224efed2b599" containerName="ceilometer-central-agent" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.386826 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="59aee660-242e-4407-981d-224efed2b599" containerName="ceilometer-central-agent" Nov 25 17:06:08 crc kubenswrapper[4812]: E1125 17:06:08.386853 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59aee660-242e-4407-981d-224efed2b599" containerName="sg-core" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.386859 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="59aee660-242e-4407-981d-224efed2b599" containerName="sg-core" Nov 25 17:06:08 crc kubenswrapper[4812]: E1125 17:06:08.386871 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59aee660-242e-4407-981d-224efed2b599" containerName="proxy-httpd" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.386876 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="59aee660-242e-4407-981d-224efed2b599" containerName="proxy-httpd" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.387034 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="59aee660-242e-4407-981d-224efed2b599" containerName="ceilometer-central-agent" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.387050 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="59aee660-242e-4407-981d-224efed2b599" containerName="sg-core" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.387067 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="59aee660-242e-4407-981d-224efed2b599" containerName="proxy-httpd" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.387073 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="59aee660-242e-4407-981d-224efed2b599" containerName="ceilometer-notification-agent" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.388685 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.390771 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.390983 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.393596 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.399160 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.410370 4812 scope.go:117] "RemoveContainer" containerID="8950ecf13067c1bda9356a9d78c647186220be2498ccd62b9899fa23b292367e" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.430479 4812 scope.go:117] "RemoveContainer" containerID="88edba3087801ecb8ae6a9e94ca8f33037f42cc95b435d7ae3f1c8573d07cc64" Nov 25 17:06:08 crc kubenswrapper[4812]: E1125 17:06:08.430884 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"88edba3087801ecb8ae6a9e94ca8f33037f42cc95b435d7ae3f1c8573d07cc64\": container with ID starting with 88edba3087801ecb8ae6a9e94ca8f33037f42cc95b435d7ae3f1c8573d07cc64 not found: ID does not exist" containerID="88edba3087801ecb8ae6a9e94ca8f33037f42cc95b435d7ae3f1c8573d07cc64" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.430941 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"88edba3087801ecb8ae6a9e94ca8f33037f42cc95b435d7ae3f1c8573d07cc64"} err="failed to get container status \"88edba3087801ecb8ae6a9e94ca8f33037f42cc95b435d7ae3f1c8573d07cc64\": rpc error: code = NotFound desc = could not find container \"88edba3087801ecb8ae6a9e94ca8f33037f42cc95b435d7ae3f1c8573d07cc64\": container with ID starting with 88edba3087801ecb8ae6a9e94ca8f33037f42cc95b435d7ae3f1c8573d07cc64 not found: ID does not exist" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.430968 4812 scope.go:117] "RemoveContainer" containerID="c44b67ef467f253e0323d66f3ce0514790096b5037bbb55f50b15732bca95a5f" Nov 25 17:06:08 crc kubenswrapper[4812]: E1125 17:06:08.431330 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c44b67ef467f253e0323d66f3ce0514790096b5037bbb55f50b15732bca95a5f\": container with ID starting with c44b67ef467f253e0323d66f3ce0514790096b5037bbb55f50b15732bca95a5f not found: ID does not exist" containerID="c44b67ef467f253e0323d66f3ce0514790096b5037bbb55f50b15732bca95a5f" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.431364 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c44b67ef467f253e0323d66f3ce0514790096b5037bbb55f50b15732bca95a5f"} err="failed to get container status \"c44b67ef467f253e0323d66f3ce0514790096b5037bbb55f50b15732bca95a5f\": rpc error: code = NotFound desc = could not find container \"c44b67ef467f253e0323d66f3ce0514790096b5037bbb55f50b15732bca95a5f\": container with ID starting with c44b67ef467f253e0323d66f3ce0514790096b5037bbb55f50b15732bca95a5f not found: ID does not exist" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.431385 4812 scope.go:117] "RemoveContainer" containerID="db8d01af0205be335df8f694fa59b47476d78a3081f4e872bbcec5dd644d1670" Nov 25 17:06:08 crc kubenswrapper[4812]: E1125 17:06:08.431584 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"db8d01af0205be335df8f694fa59b47476d78a3081f4e872bbcec5dd644d1670\": container with ID starting with db8d01af0205be335df8f694fa59b47476d78a3081f4e872bbcec5dd644d1670 not found: ID does not exist" containerID="db8d01af0205be335df8f694fa59b47476d78a3081f4e872bbcec5dd644d1670" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.431630 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db8d01af0205be335df8f694fa59b47476d78a3081f4e872bbcec5dd644d1670"} err="failed to get container status \"db8d01af0205be335df8f694fa59b47476d78a3081f4e872bbcec5dd644d1670\": rpc error: code = NotFound desc = could not find container \"db8d01af0205be335df8f694fa59b47476d78a3081f4e872bbcec5dd644d1670\": container with ID starting with db8d01af0205be335df8f694fa59b47476d78a3081f4e872bbcec5dd644d1670 not found: ID does not exist" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.431647 4812 scope.go:117] "RemoveContainer" containerID="8950ecf13067c1bda9356a9d78c647186220be2498ccd62b9899fa23b292367e" Nov 25 17:06:08 crc kubenswrapper[4812]: E1125 17:06:08.431918 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8950ecf13067c1bda9356a9d78c647186220be2498ccd62b9899fa23b292367e\": container with ID starting with 8950ecf13067c1bda9356a9d78c647186220be2498ccd62b9899fa23b292367e not found: ID does not exist" containerID="8950ecf13067c1bda9356a9d78c647186220be2498ccd62b9899fa23b292367e" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.431943 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8950ecf13067c1bda9356a9d78c647186220be2498ccd62b9899fa23b292367e"} err="failed to get container status \"8950ecf13067c1bda9356a9d78c647186220be2498ccd62b9899fa23b292367e\": rpc error: code = NotFound desc = could not find container \"8950ecf13067c1bda9356a9d78c647186220be2498ccd62b9899fa23b292367e\": container with ID starting with 8950ecf13067c1bda9356a9d78c647186220be2498ccd62b9899fa23b292367e not found: ID does not exist" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.506522 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/12174231-9610-42f1-aaea-50d8aeae60a5-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"12174231-9610-42f1-aaea-50d8aeae60a5\") " pod="openstack/ceilometer-0" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.506603 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/12174231-9610-42f1-aaea-50d8aeae60a5-scripts\") pod \"ceilometer-0\" (UID: \"12174231-9610-42f1-aaea-50d8aeae60a5\") " pod="openstack/ceilometer-0" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.506632 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nfknq\" (UniqueName: \"kubernetes.io/projected/12174231-9610-42f1-aaea-50d8aeae60a5-kube-api-access-nfknq\") pod \"ceilometer-0\" (UID: \"12174231-9610-42f1-aaea-50d8aeae60a5\") " pod="openstack/ceilometer-0" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.506663 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12174231-9610-42f1-aaea-50d8aeae60a5-config-data\") pod \"ceilometer-0\" (UID: \"12174231-9610-42f1-aaea-50d8aeae60a5\") " pod="openstack/ceilometer-0" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.506796 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/12174231-9610-42f1-aaea-50d8aeae60a5-run-httpd\") pod \"ceilometer-0\" (UID: \"12174231-9610-42f1-aaea-50d8aeae60a5\") " pod="openstack/ceilometer-0" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.506855 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/12174231-9610-42f1-aaea-50d8aeae60a5-log-httpd\") pod \"ceilometer-0\" (UID: \"12174231-9610-42f1-aaea-50d8aeae60a5\") " pod="openstack/ceilometer-0" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.506913 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12174231-9610-42f1-aaea-50d8aeae60a5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"12174231-9610-42f1-aaea-50d8aeae60a5\") " pod="openstack/ceilometer-0" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.506962 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/12174231-9610-42f1-aaea-50d8aeae60a5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"12174231-9610-42f1-aaea-50d8aeae60a5\") " pod="openstack/ceilometer-0" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.608169 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/12174231-9610-42f1-aaea-50d8aeae60a5-log-httpd\") pod \"ceilometer-0\" (UID: \"12174231-9610-42f1-aaea-50d8aeae60a5\") " pod="openstack/ceilometer-0" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.608238 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12174231-9610-42f1-aaea-50d8aeae60a5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"12174231-9610-42f1-aaea-50d8aeae60a5\") " pod="openstack/ceilometer-0" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.608268 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/12174231-9610-42f1-aaea-50d8aeae60a5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"12174231-9610-42f1-aaea-50d8aeae60a5\") " pod="openstack/ceilometer-0" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.608315 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/12174231-9610-42f1-aaea-50d8aeae60a5-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"12174231-9610-42f1-aaea-50d8aeae60a5\") " pod="openstack/ceilometer-0" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.608342 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/12174231-9610-42f1-aaea-50d8aeae60a5-scripts\") pod \"ceilometer-0\" (UID: \"12174231-9610-42f1-aaea-50d8aeae60a5\") " pod="openstack/ceilometer-0" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.608359 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nfknq\" (UniqueName: \"kubernetes.io/projected/12174231-9610-42f1-aaea-50d8aeae60a5-kube-api-access-nfknq\") pod \"ceilometer-0\" (UID: \"12174231-9610-42f1-aaea-50d8aeae60a5\") " pod="openstack/ceilometer-0" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.608386 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12174231-9610-42f1-aaea-50d8aeae60a5-config-data\") pod \"ceilometer-0\" (UID: \"12174231-9610-42f1-aaea-50d8aeae60a5\") " pod="openstack/ceilometer-0" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.608449 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/12174231-9610-42f1-aaea-50d8aeae60a5-run-httpd\") pod \"ceilometer-0\" (UID: \"12174231-9610-42f1-aaea-50d8aeae60a5\") " pod="openstack/ceilometer-0" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.608752 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/12174231-9610-42f1-aaea-50d8aeae60a5-log-httpd\") pod \"ceilometer-0\" (UID: \"12174231-9610-42f1-aaea-50d8aeae60a5\") " pod="openstack/ceilometer-0" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.608861 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/12174231-9610-42f1-aaea-50d8aeae60a5-run-httpd\") pod \"ceilometer-0\" (UID: \"12174231-9610-42f1-aaea-50d8aeae60a5\") " pod="openstack/ceilometer-0" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.612389 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/12174231-9610-42f1-aaea-50d8aeae60a5-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"12174231-9610-42f1-aaea-50d8aeae60a5\") " pod="openstack/ceilometer-0" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.612419 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12174231-9610-42f1-aaea-50d8aeae60a5-config-data\") pod \"ceilometer-0\" (UID: \"12174231-9610-42f1-aaea-50d8aeae60a5\") " pod="openstack/ceilometer-0" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.613015 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/12174231-9610-42f1-aaea-50d8aeae60a5-scripts\") pod \"ceilometer-0\" (UID: \"12174231-9610-42f1-aaea-50d8aeae60a5\") " pod="openstack/ceilometer-0" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.613166 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/12174231-9610-42f1-aaea-50d8aeae60a5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"12174231-9610-42f1-aaea-50d8aeae60a5\") " pod="openstack/ceilometer-0" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.613361 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12174231-9610-42f1-aaea-50d8aeae60a5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"12174231-9610-42f1-aaea-50d8aeae60a5\") " pod="openstack/ceilometer-0" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.624966 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nfknq\" (UniqueName: \"kubernetes.io/projected/12174231-9610-42f1-aaea-50d8aeae60a5-kube-api-access-nfknq\") pod \"ceilometer-0\" (UID: \"12174231-9610-42f1-aaea-50d8aeae60a5\") " pod="openstack/ceilometer-0" Nov 25 17:06:08 crc kubenswrapper[4812]: I1125 17:06:08.710179 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 17:06:09 crc kubenswrapper[4812]: I1125 17:06:09.157520 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:06:09 crc kubenswrapper[4812]: I1125 17:06:09.337024 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"12174231-9610-42f1-aaea-50d8aeae60a5","Type":"ContainerStarted","Data":"d78c42dc472acfec4ee9f42102cd173032d39babdb9e2002cb50b992d5014859"} Nov 25 17:06:09 crc kubenswrapper[4812]: I1125 17:06:09.842597 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="59aee660-242e-4407-981d-224efed2b599" path="/var/lib/kubelet/pods/59aee660-242e-4407-981d-224efed2b599/volumes" Nov 25 17:06:12 crc kubenswrapper[4812]: I1125 17:06:12.372357 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"12174231-9610-42f1-aaea-50d8aeae60a5","Type":"ContainerStarted","Data":"cb1a3d783bf22e462aa06323ea8dee14932a14e0b7e9de16462aefd3bff05790"} Nov 25 17:06:13 crc kubenswrapper[4812]: I1125 17:06:13.386194 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"12174231-9610-42f1-aaea-50d8aeae60a5","Type":"ContainerStarted","Data":"3174e048735fb18659b69e9f57f90a70901161cc4133fc5150797ff5079f97f7"} Nov 25 17:06:13 crc kubenswrapper[4812]: I1125 17:06:13.946440 4812 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","podd9186fd5-246f-4bb3-b3b8-926d6d66ed25"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort podd9186fd5-246f-4bb3-b3b8-926d6d66ed25] : Timed out while waiting for systemd to remove kubepods-besteffort-podd9186fd5_246f_4bb3_b3b8_926d6d66ed25.slice" Nov 25 17:06:14 crc kubenswrapper[4812]: I1125 17:06:14.419327 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"12174231-9610-42f1-aaea-50d8aeae60a5","Type":"ContainerStarted","Data":"5224c0e6f79cf66d76af4c830e437262bd9ccb96243564de4ae3b05675a4cb5b"} Nov 25 17:06:16 crc kubenswrapper[4812]: I1125 17:06:16.438782 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"12174231-9610-42f1-aaea-50d8aeae60a5","Type":"ContainerStarted","Data":"b93ae69aca4442271c7b7728df6c6a787f2f65a0d5778e60674692f037294dfc"} Nov 25 17:06:16 crc kubenswrapper[4812]: I1125 17:06:16.439276 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 17:06:16 crc kubenswrapper[4812]: I1125 17:06:16.476705 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.565933297 podStartE2EDuration="8.476684777s" podCreationTimestamp="2025-11-25 17:06:08 +0000 UTC" firstStartedPulling="2025-11-25 17:06:09.16656677 +0000 UTC m=+1144.006708875" lastFinishedPulling="2025-11-25 17:06:16.07731826 +0000 UTC m=+1150.917460355" observedRunningTime="2025-11-25 17:06:16.473791199 +0000 UTC m=+1151.313933284" watchObservedRunningTime="2025-11-25 17:06:16.476684777 +0000 UTC m=+1151.316826882" Nov 25 17:06:19 crc kubenswrapper[4812]: I1125 17:06:19.466728 4812 generic.go:334] "Generic (PLEG): container finished" podID="d944d86b-9bc4-4360-89ce-07220fc618ea" containerID="de61b62cf3c6fbc1ac37cc3446a1c1d41accc0290a006a6d26dbae2ed0272711" exitCode=0 Nov 25 17:06:19 crc kubenswrapper[4812]: I1125 17:06:19.466912 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-mg5b8" event={"ID":"d944d86b-9bc4-4360-89ce-07220fc618ea","Type":"ContainerDied","Data":"de61b62cf3c6fbc1ac37cc3446a1c1d41accc0290a006a6d26dbae2ed0272711"} Nov 25 17:06:20 crc kubenswrapper[4812]: I1125 17:06:20.772195 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-mg5b8" Nov 25 17:06:20 crc kubenswrapper[4812]: I1125 17:06:20.831426 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nkpsb\" (UniqueName: \"kubernetes.io/projected/d944d86b-9bc4-4360-89ce-07220fc618ea-kube-api-access-nkpsb\") pod \"d944d86b-9bc4-4360-89ce-07220fc618ea\" (UID: \"d944d86b-9bc4-4360-89ce-07220fc618ea\") " Nov 25 17:06:20 crc kubenswrapper[4812]: I1125 17:06:20.831577 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d944d86b-9bc4-4360-89ce-07220fc618ea-config-data\") pod \"d944d86b-9bc4-4360-89ce-07220fc618ea\" (UID: \"d944d86b-9bc4-4360-89ce-07220fc618ea\") " Nov 25 17:06:20 crc kubenswrapper[4812]: I1125 17:06:20.831607 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d944d86b-9bc4-4360-89ce-07220fc618ea-combined-ca-bundle\") pod \"d944d86b-9bc4-4360-89ce-07220fc618ea\" (UID: \"d944d86b-9bc4-4360-89ce-07220fc618ea\") " Nov 25 17:06:20 crc kubenswrapper[4812]: I1125 17:06:20.831635 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d944d86b-9bc4-4360-89ce-07220fc618ea-scripts\") pod \"d944d86b-9bc4-4360-89ce-07220fc618ea\" (UID: \"d944d86b-9bc4-4360-89ce-07220fc618ea\") " Nov 25 17:06:20 crc kubenswrapper[4812]: I1125 17:06:20.838112 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d944d86b-9bc4-4360-89ce-07220fc618ea-scripts" (OuterVolumeSpecName: "scripts") pod "d944d86b-9bc4-4360-89ce-07220fc618ea" (UID: "d944d86b-9bc4-4360-89ce-07220fc618ea"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:06:20 crc kubenswrapper[4812]: I1125 17:06:20.838573 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d944d86b-9bc4-4360-89ce-07220fc618ea-kube-api-access-nkpsb" (OuterVolumeSpecName: "kube-api-access-nkpsb") pod "d944d86b-9bc4-4360-89ce-07220fc618ea" (UID: "d944d86b-9bc4-4360-89ce-07220fc618ea"). InnerVolumeSpecName "kube-api-access-nkpsb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:06:20 crc kubenswrapper[4812]: I1125 17:06:20.857182 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d944d86b-9bc4-4360-89ce-07220fc618ea-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d944d86b-9bc4-4360-89ce-07220fc618ea" (UID: "d944d86b-9bc4-4360-89ce-07220fc618ea"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:06:20 crc kubenswrapper[4812]: I1125 17:06:20.863224 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d944d86b-9bc4-4360-89ce-07220fc618ea-config-data" (OuterVolumeSpecName: "config-data") pod "d944d86b-9bc4-4360-89ce-07220fc618ea" (UID: "d944d86b-9bc4-4360-89ce-07220fc618ea"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:06:20 crc kubenswrapper[4812]: I1125 17:06:20.933558 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nkpsb\" (UniqueName: \"kubernetes.io/projected/d944d86b-9bc4-4360-89ce-07220fc618ea-kube-api-access-nkpsb\") on node \"crc\" DevicePath \"\"" Nov 25 17:06:20 crc kubenswrapper[4812]: I1125 17:06:20.933613 4812 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d944d86b-9bc4-4360-89ce-07220fc618ea-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:06:20 crc kubenswrapper[4812]: I1125 17:06:20.933625 4812 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d944d86b-9bc4-4360-89ce-07220fc618ea-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:06:20 crc kubenswrapper[4812]: I1125 17:06:20.933676 4812 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d944d86b-9bc4-4360-89ce-07220fc618ea-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:06:21 crc kubenswrapper[4812]: I1125 17:06:21.485115 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-mg5b8" event={"ID":"d944d86b-9bc4-4360-89ce-07220fc618ea","Type":"ContainerDied","Data":"6358489253c9ed12601085388087ade007d044d21989f58649ef3f5ba374f033"} Nov 25 17:06:21 crc kubenswrapper[4812]: I1125 17:06:21.485153 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6358489253c9ed12601085388087ade007d044d21989f58649ef3f5ba374f033" Nov 25 17:06:21 crc kubenswrapper[4812]: I1125 17:06:21.485217 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-mg5b8" Nov 25 17:06:21 crc kubenswrapper[4812]: I1125 17:06:21.578230 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 17:06:21 crc kubenswrapper[4812]: E1125 17:06:21.578612 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d944d86b-9bc4-4360-89ce-07220fc618ea" containerName="nova-cell0-conductor-db-sync" Nov 25 17:06:21 crc kubenswrapper[4812]: I1125 17:06:21.578631 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="d944d86b-9bc4-4360-89ce-07220fc618ea" containerName="nova-cell0-conductor-db-sync" Nov 25 17:06:21 crc kubenswrapper[4812]: I1125 17:06:21.578809 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="d944d86b-9bc4-4360-89ce-07220fc618ea" containerName="nova-cell0-conductor-db-sync" Nov 25 17:06:21 crc kubenswrapper[4812]: I1125 17:06:21.579375 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 25 17:06:21 crc kubenswrapper[4812]: I1125 17:06:21.582752 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 25 17:06:21 crc kubenswrapper[4812]: I1125 17:06:21.584086 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-8hgbb" Nov 25 17:06:21 crc kubenswrapper[4812]: I1125 17:06:21.591521 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 17:06:21 crc kubenswrapper[4812]: I1125 17:06:21.745056 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfcf2a25-70f3-4ba4-b7c2-4be6797a813f-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"bfcf2a25-70f3-4ba4-b7c2-4be6797a813f\") " pod="openstack/nova-cell0-conductor-0" Nov 25 17:06:21 crc kubenswrapper[4812]: I1125 17:06:21.745106 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8cbhc\" (UniqueName: \"kubernetes.io/projected/bfcf2a25-70f3-4ba4-b7c2-4be6797a813f-kube-api-access-8cbhc\") pod \"nova-cell0-conductor-0\" (UID: \"bfcf2a25-70f3-4ba4-b7c2-4be6797a813f\") " pod="openstack/nova-cell0-conductor-0" Nov 25 17:06:21 crc kubenswrapper[4812]: I1125 17:06:21.745178 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfcf2a25-70f3-4ba4-b7c2-4be6797a813f-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"bfcf2a25-70f3-4ba4-b7c2-4be6797a813f\") " pod="openstack/nova-cell0-conductor-0" Nov 25 17:06:21 crc kubenswrapper[4812]: I1125 17:06:21.847101 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfcf2a25-70f3-4ba4-b7c2-4be6797a813f-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"bfcf2a25-70f3-4ba4-b7c2-4be6797a813f\") " pod="openstack/nova-cell0-conductor-0" Nov 25 17:06:21 crc kubenswrapper[4812]: I1125 17:06:21.847877 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8cbhc\" (UniqueName: \"kubernetes.io/projected/bfcf2a25-70f3-4ba4-b7c2-4be6797a813f-kube-api-access-8cbhc\") pod \"nova-cell0-conductor-0\" (UID: \"bfcf2a25-70f3-4ba4-b7c2-4be6797a813f\") " pod="openstack/nova-cell0-conductor-0" Nov 25 17:06:21 crc kubenswrapper[4812]: I1125 17:06:21.848389 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfcf2a25-70f3-4ba4-b7c2-4be6797a813f-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"bfcf2a25-70f3-4ba4-b7c2-4be6797a813f\") " pod="openstack/nova-cell0-conductor-0" Nov 25 17:06:21 crc kubenswrapper[4812]: I1125 17:06:21.851388 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfcf2a25-70f3-4ba4-b7c2-4be6797a813f-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"bfcf2a25-70f3-4ba4-b7c2-4be6797a813f\") " pod="openstack/nova-cell0-conductor-0" Nov 25 17:06:21 crc kubenswrapper[4812]: I1125 17:06:21.852078 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfcf2a25-70f3-4ba4-b7c2-4be6797a813f-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"bfcf2a25-70f3-4ba4-b7c2-4be6797a813f\") " pod="openstack/nova-cell0-conductor-0" Nov 25 17:06:21 crc kubenswrapper[4812]: I1125 17:06:21.866264 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8cbhc\" (UniqueName: \"kubernetes.io/projected/bfcf2a25-70f3-4ba4-b7c2-4be6797a813f-kube-api-access-8cbhc\") pod \"nova-cell0-conductor-0\" (UID: \"bfcf2a25-70f3-4ba4-b7c2-4be6797a813f\") " pod="openstack/nova-cell0-conductor-0" Nov 25 17:06:21 crc kubenswrapper[4812]: I1125 17:06:21.908021 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 25 17:06:22 crc kubenswrapper[4812]: I1125 17:06:22.318968 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 17:06:22 crc kubenswrapper[4812]: W1125 17:06:22.320584 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbfcf2a25_70f3_4ba4_b7c2_4be6797a813f.slice/crio-615b9fb196c53e154b1126c9f60935b654d735ef17941f6f15eef928395079bd WatchSource:0}: Error finding container 615b9fb196c53e154b1126c9f60935b654d735ef17941f6f15eef928395079bd: Status 404 returned error can't find the container with id 615b9fb196c53e154b1126c9f60935b654d735ef17941f6f15eef928395079bd Nov 25 17:06:22 crc kubenswrapper[4812]: I1125 17:06:22.494331 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"bfcf2a25-70f3-4ba4-b7c2-4be6797a813f","Type":"ContainerStarted","Data":"615b9fb196c53e154b1126c9f60935b654d735ef17941f6f15eef928395079bd"} Nov 25 17:06:23 crc kubenswrapper[4812]: I1125 17:06:23.503746 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"bfcf2a25-70f3-4ba4-b7c2-4be6797a813f","Type":"ContainerStarted","Data":"60d29b9374cb2d2e7fc734cb453017634fc298e53a0f7abd34f70fac691eea3f"} Nov 25 17:06:23 crc kubenswrapper[4812]: I1125 17:06:23.504102 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 25 17:06:23 crc kubenswrapper[4812]: I1125 17:06:23.522841 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.522820768 podStartE2EDuration="2.522820768s" podCreationTimestamp="2025-11-25 17:06:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:06:23.517925676 +0000 UTC m=+1158.358067771" watchObservedRunningTime="2025-11-25 17:06:23.522820768 +0000 UTC m=+1158.362962863" Nov 25 17:06:31 crc kubenswrapper[4812]: I1125 17:06:31.938553 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.394129 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-cr76j"] Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.395602 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-cr76j" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.397742 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.397907 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.407329 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-cr76j"] Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.520245 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12587876-fcb8-487a-a197-0696ac90f57d-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-cr76j\" (UID: \"12587876-fcb8-487a-a197-0696ac90f57d\") " pod="openstack/nova-cell0-cell-mapping-cr76j" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.520304 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12587876-fcb8-487a-a197-0696ac90f57d-config-data\") pod \"nova-cell0-cell-mapping-cr76j\" (UID: \"12587876-fcb8-487a-a197-0696ac90f57d\") " pod="openstack/nova-cell0-cell-mapping-cr76j" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.520336 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/12587876-fcb8-487a-a197-0696ac90f57d-scripts\") pod \"nova-cell0-cell-mapping-cr76j\" (UID: \"12587876-fcb8-487a-a197-0696ac90f57d\") " pod="openstack/nova-cell0-cell-mapping-cr76j" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.520461 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v6zkv\" (UniqueName: \"kubernetes.io/projected/12587876-fcb8-487a-a197-0696ac90f57d-kube-api-access-v6zkv\") pod \"nova-cell0-cell-mapping-cr76j\" (UID: \"12587876-fcb8-487a-a197-0696ac90f57d\") " pod="openstack/nova-cell0-cell-mapping-cr76j" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.541429 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.542567 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.544724 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.568789 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.623641 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v6zkv\" (UniqueName: \"kubernetes.io/projected/12587876-fcb8-487a-a197-0696ac90f57d-kube-api-access-v6zkv\") pod \"nova-cell0-cell-mapping-cr76j\" (UID: \"12587876-fcb8-487a-a197-0696ac90f57d\") " pod="openstack/nova-cell0-cell-mapping-cr76j" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.623764 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12587876-fcb8-487a-a197-0696ac90f57d-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-cr76j\" (UID: \"12587876-fcb8-487a-a197-0696ac90f57d\") " pod="openstack/nova-cell0-cell-mapping-cr76j" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.623791 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12587876-fcb8-487a-a197-0696ac90f57d-config-data\") pod \"nova-cell0-cell-mapping-cr76j\" (UID: \"12587876-fcb8-487a-a197-0696ac90f57d\") " pod="openstack/nova-cell0-cell-mapping-cr76j" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.623824 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/12587876-fcb8-487a-a197-0696ac90f57d-scripts\") pod \"nova-cell0-cell-mapping-cr76j\" (UID: \"12587876-fcb8-487a-a197-0696ac90f57d\") " pod="openstack/nova-cell0-cell-mapping-cr76j" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.629672 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.631641 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.631679 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12587876-fcb8-487a-a197-0696ac90f57d-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-cr76j\" (UID: \"12587876-fcb8-487a-a197-0696ac90f57d\") " pod="openstack/nova-cell0-cell-mapping-cr76j" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.635939 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.645968 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12587876-fcb8-487a-a197-0696ac90f57d-config-data\") pod \"nova-cell0-cell-mapping-cr76j\" (UID: \"12587876-fcb8-487a-a197-0696ac90f57d\") " pod="openstack/nova-cell0-cell-mapping-cr76j" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.651588 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/12587876-fcb8-487a-a197-0696ac90f57d-scripts\") pod \"nova-cell0-cell-mapping-cr76j\" (UID: \"12587876-fcb8-487a-a197-0696ac90f57d\") " pod="openstack/nova-cell0-cell-mapping-cr76j" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.658708 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v6zkv\" (UniqueName: \"kubernetes.io/projected/12587876-fcb8-487a-a197-0696ac90f57d-kube-api-access-v6zkv\") pod \"nova-cell0-cell-mapping-cr76j\" (UID: \"12587876-fcb8-487a-a197-0696ac90f57d\") " pod="openstack/nova-cell0-cell-mapping-cr76j" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.671046 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.714098 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-cr76j" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.726323 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b77a55e6-65fa-46ab-9297-c363932f9219-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"b77a55e6-65fa-46ab-9297-c363932f9219\") " pod="openstack/nova-scheduler-0" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.726389 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z6k29\" (UniqueName: \"kubernetes.io/projected/b77a55e6-65fa-46ab-9297-c363932f9219-kube-api-access-z6k29\") pod \"nova-scheduler-0\" (UID: \"b77a55e6-65fa-46ab-9297-c363932f9219\") " pod="openstack/nova-scheduler-0" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.726437 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b77a55e6-65fa-46ab-9297-c363932f9219-config-data\") pod \"nova-scheduler-0\" (UID: \"b77a55e6-65fa-46ab-9297-c363932f9219\") " pod="openstack/nova-scheduler-0" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.731682 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.733499 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.740197 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.740468 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.796303 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-566b5b7845-z2wbp"] Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.797865 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-566b5b7845-z2wbp" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.838702 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2vv6\" (UniqueName: \"kubernetes.io/projected/c91d38e2-27e4-4f64-a8bc-84c070c8096e-kube-api-access-t2vv6\") pod \"nova-metadata-0\" (UID: \"c91d38e2-27e4-4f64-a8bc-84c070c8096e\") " pod="openstack/nova-metadata-0" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.838765 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c91d38e2-27e4-4f64-a8bc-84c070c8096e-config-data\") pod \"nova-metadata-0\" (UID: \"c91d38e2-27e4-4f64-a8bc-84c070c8096e\") " pod="openstack/nova-metadata-0" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.838794 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kpczk\" (UniqueName: \"kubernetes.io/projected/e418273d-c607-491b-aaa2-d30ee1cd1fb0-kube-api-access-kpczk\") pod \"dnsmasq-dns-566b5b7845-z2wbp\" (UID: \"e418273d-c607-491b-aaa2-d30ee1cd1fb0\") " pod="openstack/dnsmasq-dns-566b5b7845-z2wbp" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.838836 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1a4875b-0433-4fad-91d5-3a16227555a5-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f1a4875b-0433-4fad-91d5-3a16227555a5\") " pod="openstack/nova-api-0" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.838889 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b77a55e6-65fa-46ab-9297-c363932f9219-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"b77a55e6-65fa-46ab-9297-c363932f9219\") " pod="openstack/nova-scheduler-0" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.838920 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e418273d-c607-491b-aaa2-d30ee1cd1fb0-ovsdbserver-sb\") pod \"dnsmasq-dns-566b5b7845-z2wbp\" (UID: \"e418273d-c607-491b-aaa2-d30ee1cd1fb0\") " pod="openstack/dnsmasq-dns-566b5b7845-z2wbp" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.838951 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c91d38e2-27e4-4f64-a8bc-84c070c8096e-logs\") pod \"nova-metadata-0\" (UID: \"c91d38e2-27e4-4f64-a8bc-84c070c8096e\") " pod="openstack/nova-metadata-0" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.838979 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e418273d-c607-491b-aaa2-d30ee1cd1fb0-ovsdbserver-nb\") pod \"dnsmasq-dns-566b5b7845-z2wbp\" (UID: \"e418273d-c607-491b-aaa2-d30ee1cd1fb0\") " pod="openstack/dnsmasq-dns-566b5b7845-z2wbp" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.839022 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e418273d-c607-491b-aaa2-d30ee1cd1fb0-config\") pod \"dnsmasq-dns-566b5b7845-z2wbp\" (UID: \"e418273d-c607-491b-aaa2-d30ee1cd1fb0\") " pod="openstack/dnsmasq-dns-566b5b7845-z2wbp" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.839052 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z6k29\" (UniqueName: \"kubernetes.io/projected/b77a55e6-65fa-46ab-9297-c363932f9219-kube-api-access-z6k29\") pod \"nova-scheduler-0\" (UID: \"b77a55e6-65fa-46ab-9297-c363932f9219\") " pod="openstack/nova-scheduler-0" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.839117 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b77a55e6-65fa-46ab-9297-c363932f9219-config-data\") pod \"nova-scheduler-0\" (UID: \"b77a55e6-65fa-46ab-9297-c363932f9219\") " pod="openstack/nova-scheduler-0" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.839147 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e418273d-c607-491b-aaa2-d30ee1cd1fb0-dns-svc\") pod \"dnsmasq-dns-566b5b7845-z2wbp\" (UID: \"e418273d-c607-491b-aaa2-d30ee1cd1fb0\") " pod="openstack/dnsmasq-dns-566b5b7845-z2wbp" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.839189 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f1a4875b-0433-4fad-91d5-3a16227555a5-logs\") pod \"nova-api-0\" (UID: \"f1a4875b-0433-4fad-91d5-3a16227555a5\") " pod="openstack/nova-api-0" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.839230 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1a4875b-0433-4fad-91d5-3a16227555a5-config-data\") pod \"nova-api-0\" (UID: \"f1a4875b-0433-4fad-91d5-3a16227555a5\") " pod="openstack/nova-api-0" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.839267 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c91d38e2-27e4-4f64-a8bc-84c070c8096e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"c91d38e2-27e4-4f64-a8bc-84c070c8096e\") " pod="openstack/nova-metadata-0" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.839312 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2kvhq\" (UniqueName: \"kubernetes.io/projected/f1a4875b-0433-4fad-91d5-3a16227555a5-kube-api-access-2kvhq\") pod \"nova-api-0\" (UID: \"f1a4875b-0433-4fad-91d5-3a16227555a5\") " pod="openstack/nova-api-0" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.851355 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b77a55e6-65fa-46ab-9297-c363932f9219-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"b77a55e6-65fa-46ab-9297-c363932f9219\") " pod="openstack/nova-scheduler-0" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.855553 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b77a55e6-65fa-46ab-9297-c363932f9219-config-data\") pod \"nova-scheduler-0\" (UID: \"b77a55e6-65fa-46ab-9297-c363932f9219\") " pod="openstack/nova-scheduler-0" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.872546 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-566b5b7845-z2wbp"] Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.895163 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z6k29\" (UniqueName: \"kubernetes.io/projected/b77a55e6-65fa-46ab-9297-c363932f9219-kube-api-access-z6k29\") pod \"nova-scheduler-0\" (UID: \"b77a55e6-65fa-46ab-9297-c363932f9219\") " pod="openstack/nova-scheduler-0" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.897793 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.899311 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.902335 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.918856 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.941845 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c91d38e2-27e4-4f64-a8bc-84c070c8096e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"c91d38e2-27e4-4f64-a8bc-84c070c8096e\") " pod="openstack/nova-metadata-0" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.941904 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2kvhq\" (UniqueName: \"kubernetes.io/projected/f1a4875b-0433-4fad-91d5-3a16227555a5-kube-api-access-2kvhq\") pod \"nova-api-0\" (UID: \"f1a4875b-0433-4fad-91d5-3a16227555a5\") " pod="openstack/nova-api-0" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.941931 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2vv6\" (UniqueName: \"kubernetes.io/projected/c91d38e2-27e4-4f64-a8bc-84c070c8096e-kube-api-access-t2vv6\") pod \"nova-metadata-0\" (UID: \"c91d38e2-27e4-4f64-a8bc-84c070c8096e\") " pod="openstack/nova-metadata-0" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.941950 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c91d38e2-27e4-4f64-a8bc-84c070c8096e-config-data\") pod \"nova-metadata-0\" (UID: \"c91d38e2-27e4-4f64-a8bc-84c070c8096e\") " pod="openstack/nova-metadata-0" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.941966 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kpczk\" (UniqueName: \"kubernetes.io/projected/e418273d-c607-491b-aaa2-d30ee1cd1fb0-kube-api-access-kpczk\") pod \"dnsmasq-dns-566b5b7845-z2wbp\" (UID: \"e418273d-c607-491b-aaa2-d30ee1cd1fb0\") " pod="openstack/dnsmasq-dns-566b5b7845-z2wbp" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.941993 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1a4875b-0433-4fad-91d5-3a16227555a5-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f1a4875b-0433-4fad-91d5-3a16227555a5\") " pod="openstack/nova-api-0" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.942029 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e418273d-c607-491b-aaa2-d30ee1cd1fb0-ovsdbserver-sb\") pod \"dnsmasq-dns-566b5b7845-z2wbp\" (UID: \"e418273d-c607-491b-aaa2-d30ee1cd1fb0\") " pod="openstack/dnsmasq-dns-566b5b7845-z2wbp" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.942049 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c91d38e2-27e4-4f64-a8bc-84c070c8096e-logs\") pod \"nova-metadata-0\" (UID: \"c91d38e2-27e4-4f64-a8bc-84c070c8096e\") " pod="openstack/nova-metadata-0" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.942064 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e418273d-c607-491b-aaa2-d30ee1cd1fb0-ovsdbserver-nb\") pod \"dnsmasq-dns-566b5b7845-z2wbp\" (UID: \"e418273d-c607-491b-aaa2-d30ee1cd1fb0\") " pod="openstack/dnsmasq-dns-566b5b7845-z2wbp" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.942970 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e418273d-c607-491b-aaa2-d30ee1cd1fb0-config\") pod \"dnsmasq-dns-566b5b7845-z2wbp\" (UID: \"e418273d-c607-491b-aaa2-d30ee1cd1fb0\") " pod="openstack/dnsmasq-dns-566b5b7845-z2wbp" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.943043 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e418273d-c607-491b-aaa2-d30ee1cd1fb0-dns-svc\") pod \"dnsmasq-dns-566b5b7845-z2wbp\" (UID: \"e418273d-c607-491b-aaa2-d30ee1cd1fb0\") " pod="openstack/dnsmasq-dns-566b5b7845-z2wbp" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.943060 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f1a4875b-0433-4fad-91d5-3a16227555a5-logs\") pod \"nova-api-0\" (UID: \"f1a4875b-0433-4fad-91d5-3a16227555a5\") " pod="openstack/nova-api-0" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.943099 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1a4875b-0433-4fad-91d5-3a16227555a5-config-data\") pod \"nova-api-0\" (UID: \"f1a4875b-0433-4fad-91d5-3a16227555a5\") " pod="openstack/nova-api-0" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.944131 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f1a4875b-0433-4fad-91d5-3a16227555a5-logs\") pod \"nova-api-0\" (UID: \"f1a4875b-0433-4fad-91d5-3a16227555a5\") " pod="openstack/nova-api-0" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.944280 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c91d38e2-27e4-4f64-a8bc-84c070c8096e-logs\") pod \"nova-metadata-0\" (UID: \"c91d38e2-27e4-4f64-a8bc-84c070c8096e\") " pod="openstack/nova-metadata-0" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.944305 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e418273d-c607-491b-aaa2-d30ee1cd1fb0-config\") pod \"dnsmasq-dns-566b5b7845-z2wbp\" (UID: \"e418273d-c607-491b-aaa2-d30ee1cd1fb0\") " pod="openstack/dnsmasq-dns-566b5b7845-z2wbp" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.946018 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1a4875b-0433-4fad-91d5-3a16227555a5-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f1a4875b-0433-4fad-91d5-3a16227555a5\") " pod="openstack/nova-api-0" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.947507 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1a4875b-0433-4fad-91d5-3a16227555a5-config-data\") pod \"nova-api-0\" (UID: \"f1a4875b-0433-4fad-91d5-3a16227555a5\") " pod="openstack/nova-api-0" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.949237 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e418273d-c607-491b-aaa2-d30ee1cd1fb0-ovsdbserver-sb\") pod \"dnsmasq-dns-566b5b7845-z2wbp\" (UID: \"e418273d-c607-491b-aaa2-d30ee1cd1fb0\") " pod="openstack/dnsmasq-dns-566b5b7845-z2wbp" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.949323 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c91d38e2-27e4-4f64-a8bc-84c070c8096e-config-data\") pod \"nova-metadata-0\" (UID: \"c91d38e2-27e4-4f64-a8bc-84c070c8096e\") " pod="openstack/nova-metadata-0" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.949957 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e418273d-c607-491b-aaa2-d30ee1cd1fb0-ovsdbserver-nb\") pod \"dnsmasq-dns-566b5b7845-z2wbp\" (UID: \"e418273d-c607-491b-aaa2-d30ee1cd1fb0\") " pod="openstack/dnsmasq-dns-566b5b7845-z2wbp" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.951243 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c91d38e2-27e4-4f64-a8bc-84c070c8096e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"c91d38e2-27e4-4f64-a8bc-84c070c8096e\") " pod="openstack/nova-metadata-0" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.951971 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e418273d-c607-491b-aaa2-d30ee1cd1fb0-dns-svc\") pod \"dnsmasq-dns-566b5b7845-z2wbp\" (UID: \"e418273d-c607-491b-aaa2-d30ee1cd1fb0\") " pod="openstack/dnsmasq-dns-566b5b7845-z2wbp" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.972180 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2kvhq\" (UniqueName: \"kubernetes.io/projected/f1a4875b-0433-4fad-91d5-3a16227555a5-kube-api-access-2kvhq\") pod \"nova-api-0\" (UID: \"f1a4875b-0433-4fad-91d5-3a16227555a5\") " pod="openstack/nova-api-0" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.972350 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kpczk\" (UniqueName: \"kubernetes.io/projected/e418273d-c607-491b-aaa2-d30ee1cd1fb0-kube-api-access-kpczk\") pod \"dnsmasq-dns-566b5b7845-z2wbp\" (UID: \"e418273d-c607-491b-aaa2-d30ee1cd1fb0\") " pod="openstack/dnsmasq-dns-566b5b7845-z2wbp" Nov 25 17:06:32 crc kubenswrapper[4812]: I1125 17:06:32.973980 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2vv6\" (UniqueName: \"kubernetes.io/projected/c91d38e2-27e4-4f64-a8bc-84c070c8096e-kube-api-access-t2vv6\") pod \"nova-metadata-0\" (UID: \"c91d38e2-27e4-4f64-a8bc-84c070c8096e\") " pod="openstack/nova-metadata-0" Nov 25 17:06:33 crc kubenswrapper[4812]: I1125 17:06:33.045100 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/22e89ee4-7aad-4b7b-a315-14593ff5ddfb-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"22e89ee4-7aad-4b7b-a315-14593ff5ddfb\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 17:06:33 crc kubenswrapper[4812]: I1125 17:06:33.045151 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fzpjv\" (UniqueName: \"kubernetes.io/projected/22e89ee4-7aad-4b7b-a315-14593ff5ddfb-kube-api-access-fzpjv\") pod \"nova-cell1-novncproxy-0\" (UID: \"22e89ee4-7aad-4b7b-a315-14593ff5ddfb\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 17:06:33 crc kubenswrapper[4812]: I1125 17:06:33.045231 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/22e89ee4-7aad-4b7b-a315-14593ff5ddfb-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"22e89ee4-7aad-4b7b-a315-14593ff5ddfb\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 17:06:33 crc kubenswrapper[4812]: I1125 17:06:33.147758 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fzpjv\" (UniqueName: \"kubernetes.io/projected/22e89ee4-7aad-4b7b-a315-14593ff5ddfb-kube-api-access-fzpjv\") pod \"nova-cell1-novncproxy-0\" (UID: \"22e89ee4-7aad-4b7b-a315-14593ff5ddfb\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 17:06:33 crc kubenswrapper[4812]: I1125 17:06:33.147865 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/22e89ee4-7aad-4b7b-a315-14593ff5ddfb-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"22e89ee4-7aad-4b7b-a315-14593ff5ddfb\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 17:06:33 crc kubenswrapper[4812]: I1125 17:06:33.147944 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/22e89ee4-7aad-4b7b-a315-14593ff5ddfb-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"22e89ee4-7aad-4b7b-a315-14593ff5ddfb\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 17:06:33 crc kubenswrapper[4812]: I1125 17:06:33.158016 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/22e89ee4-7aad-4b7b-a315-14593ff5ddfb-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"22e89ee4-7aad-4b7b-a315-14593ff5ddfb\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 17:06:33 crc kubenswrapper[4812]: I1125 17:06:33.158319 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/22e89ee4-7aad-4b7b-a315-14593ff5ddfb-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"22e89ee4-7aad-4b7b-a315-14593ff5ddfb\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 17:06:33 crc kubenswrapper[4812]: I1125 17:06:33.164374 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 17:06:33 crc kubenswrapper[4812]: I1125 17:06:33.165951 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fzpjv\" (UniqueName: \"kubernetes.io/projected/22e89ee4-7aad-4b7b-a315-14593ff5ddfb-kube-api-access-fzpjv\") pod \"nova-cell1-novncproxy-0\" (UID: \"22e89ee4-7aad-4b7b-a315-14593ff5ddfb\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 17:06:33 crc kubenswrapper[4812]: I1125 17:06:33.210911 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 17:06:33 crc kubenswrapper[4812]: I1125 17:06:33.223379 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 17:06:33 crc kubenswrapper[4812]: I1125 17:06:33.232816 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-566b5b7845-z2wbp" Nov 25 17:06:33 crc kubenswrapper[4812]: I1125 17:06:33.262293 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 17:06:33 crc kubenswrapper[4812]: I1125 17:06:33.334975 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-2kj2k"] Nov 25 17:06:33 crc kubenswrapper[4812]: I1125 17:06:33.336299 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-2kj2k" Nov 25 17:06:33 crc kubenswrapper[4812]: I1125 17:06:33.339894 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 25 17:06:33 crc kubenswrapper[4812]: I1125 17:06:33.344773 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 25 17:06:33 crc kubenswrapper[4812]: I1125 17:06:33.372398 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-2kj2k"] Nov 25 17:06:33 crc kubenswrapper[4812]: I1125 17:06:33.425871 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-cr76j"] Nov 25 17:06:33 crc kubenswrapper[4812]: I1125 17:06:33.457982 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b37c593-c2f5-4304-bfeb-820d518bce9f-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-2kj2k\" (UID: \"0b37c593-c2f5-4304-bfeb-820d518bce9f\") " pod="openstack/nova-cell1-conductor-db-sync-2kj2k" Nov 25 17:06:33 crc kubenswrapper[4812]: I1125 17:06:33.458445 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0b37c593-c2f5-4304-bfeb-820d518bce9f-scripts\") pod \"nova-cell1-conductor-db-sync-2kj2k\" (UID: \"0b37c593-c2f5-4304-bfeb-820d518bce9f\") " pod="openstack/nova-cell1-conductor-db-sync-2kj2k" Nov 25 17:06:33 crc kubenswrapper[4812]: I1125 17:06:33.458501 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b37c593-c2f5-4304-bfeb-820d518bce9f-config-data\") pod \"nova-cell1-conductor-db-sync-2kj2k\" (UID: \"0b37c593-c2f5-4304-bfeb-820d518bce9f\") " pod="openstack/nova-cell1-conductor-db-sync-2kj2k" Nov 25 17:06:33 crc kubenswrapper[4812]: I1125 17:06:33.458588 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pqc4w\" (UniqueName: \"kubernetes.io/projected/0b37c593-c2f5-4304-bfeb-820d518bce9f-kube-api-access-pqc4w\") pod \"nova-cell1-conductor-db-sync-2kj2k\" (UID: \"0b37c593-c2f5-4304-bfeb-820d518bce9f\") " pod="openstack/nova-cell1-conductor-db-sync-2kj2k" Nov 25 17:06:33 crc kubenswrapper[4812]: I1125 17:06:33.560366 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pqc4w\" (UniqueName: \"kubernetes.io/projected/0b37c593-c2f5-4304-bfeb-820d518bce9f-kube-api-access-pqc4w\") pod \"nova-cell1-conductor-db-sync-2kj2k\" (UID: \"0b37c593-c2f5-4304-bfeb-820d518bce9f\") " pod="openstack/nova-cell1-conductor-db-sync-2kj2k" Nov 25 17:06:33 crc kubenswrapper[4812]: I1125 17:06:33.560457 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b37c593-c2f5-4304-bfeb-820d518bce9f-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-2kj2k\" (UID: \"0b37c593-c2f5-4304-bfeb-820d518bce9f\") " pod="openstack/nova-cell1-conductor-db-sync-2kj2k" Nov 25 17:06:33 crc kubenswrapper[4812]: I1125 17:06:33.560519 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0b37c593-c2f5-4304-bfeb-820d518bce9f-scripts\") pod \"nova-cell1-conductor-db-sync-2kj2k\" (UID: \"0b37c593-c2f5-4304-bfeb-820d518bce9f\") " pod="openstack/nova-cell1-conductor-db-sync-2kj2k" Nov 25 17:06:33 crc kubenswrapper[4812]: I1125 17:06:33.560603 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b37c593-c2f5-4304-bfeb-820d518bce9f-config-data\") pod \"nova-cell1-conductor-db-sync-2kj2k\" (UID: \"0b37c593-c2f5-4304-bfeb-820d518bce9f\") " pod="openstack/nova-cell1-conductor-db-sync-2kj2k" Nov 25 17:06:33 crc kubenswrapper[4812]: I1125 17:06:33.564422 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b37c593-c2f5-4304-bfeb-820d518bce9f-config-data\") pod \"nova-cell1-conductor-db-sync-2kj2k\" (UID: \"0b37c593-c2f5-4304-bfeb-820d518bce9f\") " pod="openstack/nova-cell1-conductor-db-sync-2kj2k" Nov 25 17:06:33 crc kubenswrapper[4812]: I1125 17:06:33.565459 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0b37c593-c2f5-4304-bfeb-820d518bce9f-scripts\") pod \"nova-cell1-conductor-db-sync-2kj2k\" (UID: \"0b37c593-c2f5-4304-bfeb-820d518bce9f\") " pod="openstack/nova-cell1-conductor-db-sync-2kj2k" Nov 25 17:06:33 crc kubenswrapper[4812]: I1125 17:06:33.567556 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b37c593-c2f5-4304-bfeb-820d518bce9f-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-2kj2k\" (UID: \"0b37c593-c2f5-4304-bfeb-820d518bce9f\") " pod="openstack/nova-cell1-conductor-db-sync-2kj2k" Nov 25 17:06:33 crc kubenswrapper[4812]: I1125 17:06:33.579623 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pqc4w\" (UniqueName: \"kubernetes.io/projected/0b37c593-c2f5-4304-bfeb-820d518bce9f-kube-api-access-pqc4w\") pod \"nova-cell1-conductor-db-sync-2kj2k\" (UID: \"0b37c593-c2f5-4304-bfeb-820d518bce9f\") " pod="openstack/nova-cell1-conductor-db-sync-2kj2k" Nov 25 17:06:33 crc kubenswrapper[4812]: I1125 17:06:33.597117 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-cr76j" event={"ID":"12587876-fcb8-487a-a197-0696ac90f57d","Type":"ContainerStarted","Data":"b74c7548f3448a028713a7336bd3f0434f576132d0a83da3eacdb9e8653c6783"} Nov 25 17:06:33 crc kubenswrapper[4812]: I1125 17:06:33.686395 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-2kj2k" Nov 25 17:06:34 crc kubenswrapper[4812]: I1125 17:06:34.032928 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 17:06:34 crc kubenswrapper[4812]: I1125 17:06:34.050014 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-566b5b7845-z2wbp"] Nov 25 17:06:34 crc kubenswrapper[4812]: W1125 17:06:34.053659 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb77a55e6_65fa_46ab_9297_c363932f9219.slice/crio-327efb9cfdb2cf3480e1bb2b135f9a35c0eedfd9d371f68c8e86430ee7bab8aa WatchSource:0}: Error finding container 327efb9cfdb2cf3480e1bb2b135f9a35c0eedfd9d371f68c8e86430ee7bab8aa: Status 404 returned error can't find the container with id 327efb9cfdb2cf3480e1bb2b135f9a35c0eedfd9d371f68c8e86430ee7bab8aa Nov 25 17:06:34 crc kubenswrapper[4812]: W1125 17:06:34.058456 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode418273d_c607_491b_aaa2_d30ee1cd1fb0.slice/crio-c6fee354d96d2627053a1602ca8241d7fb9809895ca452de75f3f2189eca7017 WatchSource:0}: Error finding container c6fee354d96d2627053a1602ca8241d7fb9809895ca452de75f3f2189eca7017: Status 404 returned error can't find the container with id c6fee354d96d2627053a1602ca8241d7fb9809895ca452de75f3f2189eca7017 Nov 25 17:06:34 crc kubenswrapper[4812]: I1125 17:06:34.078592 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 17:06:34 crc kubenswrapper[4812]: I1125 17:06:34.099607 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 17:06:34 crc kubenswrapper[4812]: I1125 17:06:34.116372 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 17:06:34 crc kubenswrapper[4812]: W1125 17:06:34.146098 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf1a4875b_0433_4fad_91d5_3a16227555a5.slice/crio-dda836ced9704b1ed876c12e7b9f9fbac532c1811bf543bf7161cb6bfd469749 WatchSource:0}: Error finding container dda836ced9704b1ed876c12e7b9f9fbac532c1811bf543bf7161cb6bfd469749: Status 404 returned error can't find the container with id dda836ced9704b1ed876c12e7b9f9fbac532c1811bf543bf7161cb6bfd469749 Nov 25 17:06:34 crc kubenswrapper[4812]: I1125 17:06:34.217926 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-2kj2k"] Nov 25 17:06:34 crc kubenswrapper[4812]: W1125 17:06:34.222390 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0b37c593_c2f5_4304_bfeb_820d518bce9f.slice/crio-0987aee6bc6ed93dfa52029779a793a3f6f780e83b5bca553179465d7ca1d0ca WatchSource:0}: Error finding container 0987aee6bc6ed93dfa52029779a793a3f6f780e83b5bca553179465d7ca1d0ca: Status 404 returned error can't find the container with id 0987aee6bc6ed93dfa52029779a793a3f6f780e83b5bca553179465d7ca1d0ca Nov 25 17:06:34 crc kubenswrapper[4812]: I1125 17:06:34.606009 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"b77a55e6-65fa-46ab-9297-c363932f9219","Type":"ContainerStarted","Data":"327efb9cfdb2cf3480e1bb2b135f9a35c0eedfd9d371f68c8e86430ee7bab8aa"} Nov 25 17:06:34 crc kubenswrapper[4812]: I1125 17:06:34.607783 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-2kj2k" event={"ID":"0b37c593-c2f5-4304-bfeb-820d518bce9f","Type":"ContainerStarted","Data":"a2a5f55ae19d352942c7ceb265494110ba04e0647736567a174f341cfda71271"} Nov 25 17:06:34 crc kubenswrapper[4812]: I1125 17:06:34.607810 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-2kj2k" event={"ID":"0b37c593-c2f5-4304-bfeb-820d518bce9f","Type":"ContainerStarted","Data":"0987aee6bc6ed93dfa52029779a793a3f6f780e83b5bca553179465d7ca1d0ca"} Nov 25 17:06:34 crc kubenswrapper[4812]: I1125 17:06:34.610508 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-cr76j" event={"ID":"12587876-fcb8-487a-a197-0696ac90f57d","Type":"ContainerStarted","Data":"05e90a883068b3bbd7169aa3a5de41d93dd52038953f351098c8e81af14c0bf9"} Nov 25 17:06:34 crc kubenswrapper[4812]: I1125 17:06:34.611643 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c91d38e2-27e4-4f64-a8bc-84c070c8096e","Type":"ContainerStarted","Data":"b545c13d1a9b8ead2f7ad02c47c24ecfcb25917c6631f2e4dae4215746adeda0"} Nov 25 17:06:34 crc kubenswrapper[4812]: I1125 17:06:34.612867 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f1a4875b-0433-4fad-91d5-3a16227555a5","Type":"ContainerStarted","Data":"dda836ced9704b1ed876c12e7b9f9fbac532c1811bf543bf7161cb6bfd469749"} Nov 25 17:06:34 crc kubenswrapper[4812]: I1125 17:06:34.614109 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"22e89ee4-7aad-4b7b-a315-14593ff5ddfb","Type":"ContainerStarted","Data":"ab8c13fecd0d2dffd226173c247b5becd7073e84d35525992811ce6d6af20707"} Nov 25 17:06:34 crc kubenswrapper[4812]: I1125 17:06:34.615936 4812 generic.go:334] "Generic (PLEG): container finished" podID="e418273d-c607-491b-aaa2-d30ee1cd1fb0" containerID="2d56116553003661e79e75091f6bd53ad5a3e4915b9d052596ccca45d49155f0" exitCode=0 Nov 25 17:06:34 crc kubenswrapper[4812]: I1125 17:06:34.616250 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-566b5b7845-z2wbp" event={"ID":"e418273d-c607-491b-aaa2-d30ee1cd1fb0","Type":"ContainerDied","Data":"2d56116553003661e79e75091f6bd53ad5a3e4915b9d052596ccca45d49155f0"} Nov 25 17:06:34 crc kubenswrapper[4812]: I1125 17:06:34.616286 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-566b5b7845-z2wbp" event={"ID":"e418273d-c607-491b-aaa2-d30ee1cd1fb0","Type":"ContainerStarted","Data":"c6fee354d96d2627053a1602ca8241d7fb9809895ca452de75f3f2189eca7017"} Nov 25 17:06:34 crc kubenswrapper[4812]: I1125 17:06:34.630191 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-2kj2k" podStartSLOduration=1.630174008 podStartE2EDuration="1.630174008s" podCreationTimestamp="2025-11-25 17:06:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:06:34.626875407 +0000 UTC m=+1169.467017502" watchObservedRunningTime="2025-11-25 17:06:34.630174008 +0000 UTC m=+1169.470316103" Nov 25 17:06:34 crc kubenswrapper[4812]: I1125 17:06:34.686544 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-cr76j" podStartSLOduration=2.686505925 podStartE2EDuration="2.686505925s" podCreationTimestamp="2025-11-25 17:06:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:06:34.682781003 +0000 UTC m=+1169.522923098" watchObservedRunningTime="2025-11-25 17:06:34.686505925 +0000 UTC m=+1169.526648030" Nov 25 17:06:35 crc kubenswrapper[4812]: I1125 17:06:35.636050 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-566b5b7845-z2wbp" event={"ID":"e418273d-c607-491b-aaa2-d30ee1cd1fb0","Type":"ContainerStarted","Data":"0a0644fcecec06e1b5cec9a822554d9a25b6905719516cad894f6c65765609c5"} Nov 25 17:06:35 crc kubenswrapper[4812]: I1125 17:06:35.636524 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-566b5b7845-z2wbp" Nov 25 17:06:35 crc kubenswrapper[4812]: I1125 17:06:35.654660 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-566b5b7845-z2wbp" podStartSLOduration=3.654643257 podStartE2EDuration="3.654643257s" podCreationTimestamp="2025-11-25 17:06:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:06:35.653922348 +0000 UTC m=+1170.494064443" watchObservedRunningTime="2025-11-25 17:06:35.654643257 +0000 UTC m=+1170.494785352" Nov 25 17:06:36 crc kubenswrapper[4812]: I1125 17:06:36.115842 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 17:06:36 crc kubenswrapper[4812]: I1125 17:06:36.124597 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 17:06:37 crc kubenswrapper[4812]: I1125 17:06:37.660335 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"b77a55e6-65fa-46ab-9297-c363932f9219","Type":"ContainerStarted","Data":"0ce8326fd6912628dd9c45adf6fff35f1f72e0eafaf436c8c88865b03f78da58"} Nov 25 17:06:37 crc kubenswrapper[4812]: I1125 17:06:37.672968 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c91d38e2-27e4-4f64-a8bc-84c070c8096e","Type":"ContainerStarted","Data":"96b74e481278bc080f065227435d8424f93c75de3c3152f7c4c2a1dcb1fe9c1a"} Nov 25 17:06:37 crc kubenswrapper[4812]: I1125 17:06:37.673215 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c91d38e2-27e4-4f64-a8bc-84c070c8096e","Type":"ContainerStarted","Data":"6d0325b0e51e576f2e4fa22be98f5c7f4a678983a5954059d3980ad0e5f95d96"} Nov 25 17:06:37 crc kubenswrapper[4812]: I1125 17:06:37.673141 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="c91d38e2-27e4-4f64-a8bc-84c070c8096e" containerName="nova-metadata-metadata" containerID="cri-o://96b74e481278bc080f065227435d8424f93c75de3c3152f7c4c2a1dcb1fe9c1a" gracePeriod=30 Nov 25 17:06:37 crc kubenswrapper[4812]: I1125 17:06:37.673117 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="c91d38e2-27e4-4f64-a8bc-84c070c8096e" containerName="nova-metadata-log" containerID="cri-o://6d0325b0e51e576f2e4fa22be98f5c7f4a678983a5954059d3980ad0e5f95d96" gracePeriod=30 Nov 25 17:06:37 crc kubenswrapper[4812]: I1125 17:06:37.681020 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f1a4875b-0433-4fad-91d5-3a16227555a5","Type":"ContainerStarted","Data":"4f19d1ff5cdbdecc0bca55433cee8bba1181039ce7046340588cce3b6d485a6b"} Nov 25 17:06:37 crc kubenswrapper[4812]: I1125 17:06:37.681067 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f1a4875b-0433-4fad-91d5-3a16227555a5","Type":"ContainerStarted","Data":"60962ccbe8ba43bf6400feda28e833517a4f42b13916b020964662845c3c159e"} Nov 25 17:06:37 crc kubenswrapper[4812]: I1125 17:06:37.682659 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"22e89ee4-7aad-4b7b-a315-14593ff5ddfb","Type":"ContainerStarted","Data":"3b7eb54e380529cdc8d9fa51b54e901d06dffcafa5eb6fba1c3e1efe71d8e771"} Nov 25 17:06:37 crc kubenswrapper[4812]: I1125 17:06:37.682719 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="22e89ee4-7aad-4b7b-a315-14593ff5ddfb" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://3b7eb54e380529cdc8d9fa51b54e901d06dffcafa5eb6fba1c3e1efe71d8e771" gracePeriod=30 Nov 25 17:06:37 crc kubenswrapper[4812]: I1125 17:06:37.688239 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.694932845 podStartE2EDuration="5.688216378s" podCreationTimestamp="2025-11-25 17:06:32 +0000 UTC" firstStartedPulling="2025-11-25 17:06:34.062076163 +0000 UTC m=+1168.902218258" lastFinishedPulling="2025-11-25 17:06:37.055359696 +0000 UTC m=+1171.895501791" observedRunningTime="2025-11-25 17:06:37.685872634 +0000 UTC m=+1172.526014749" watchObservedRunningTime="2025-11-25 17:06:37.688216378 +0000 UTC m=+1172.528358483" Nov 25 17:06:37 crc kubenswrapper[4812]: I1125 17:06:37.709076 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.790701868 podStartE2EDuration="5.709055287s" podCreationTimestamp="2025-11-25 17:06:32 +0000 UTC" firstStartedPulling="2025-11-25 17:06:34.135452735 +0000 UTC m=+1168.975594830" lastFinishedPulling="2025-11-25 17:06:37.053806154 +0000 UTC m=+1171.893948249" observedRunningTime="2025-11-25 17:06:37.702796886 +0000 UTC m=+1172.542938981" watchObservedRunningTime="2025-11-25 17:06:37.709055287 +0000 UTC m=+1172.549197382" Nov 25 17:06:37 crc kubenswrapper[4812]: I1125 17:06:37.727848 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.875120553 podStartE2EDuration="5.727822609s" podCreationTimestamp="2025-11-25 17:06:32 +0000 UTC" firstStartedPulling="2025-11-25 17:06:34.193779717 +0000 UTC m=+1169.033921812" lastFinishedPulling="2025-11-25 17:06:37.046481773 +0000 UTC m=+1171.886623868" observedRunningTime="2025-11-25 17:06:37.72089294 +0000 UTC m=+1172.561035035" watchObservedRunningTime="2025-11-25 17:06:37.727822609 +0000 UTC m=+1172.567964704" Nov 25 17:06:37 crc kubenswrapper[4812]: I1125 17:06:37.746548 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.834931266 podStartE2EDuration="5.746507089s" podCreationTimestamp="2025-11-25 17:06:32 +0000 UTC" firstStartedPulling="2025-11-25 17:06:34.134923451 +0000 UTC m=+1168.975065556" lastFinishedPulling="2025-11-25 17:06:37.046499284 +0000 UTC m=+1171.886641379" observedRunningTime="2025-11-25 17:06:37.741038959 +0000 UTC m=+1172.581181054" watchObservedRunningTime="2025-11-25 17:06:37.746507089 +0000 UTC m=+1172.586649184" Nov 25 17:06:38 crc kubenswrapper[4812]: I1125 17:06:38.164943 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 25 17:06:38 crc kubenswrapper[4812]: I1125 17:06:38.211217 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 17:06:38 crc kubenswrapper[4812]: I1125 17:06:38.211273 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 17:06:38 crc kubenswrapper[4812]: I1125 17:06:38.262749 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 25 17:06:38 crc kubenswrapper[4812]: I1125 17:06:38.693860 4812 generic.go:334] "Generic (PLEG): container finished" podID="c91d38e2-27e4-4f64-a8bc-84c070c8096e" containerID="6d0325b0e51e576f2e4fa22be98f5c7f4a678983a5954059d3980ad0e5f95d96" exitCode=143 Nov 25 17:06:38 crc kubenswrapper[4812]: I1125 17:06:38.693934 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c91d38e2-27e4-4f64-a8bc-84c070c8096e","Type":"ContainerDied","Data":"6d0325b0e51e576f2e4fa22be98f5c7f4a678983a5954059d3980ad0e5f95d96"} Nov 25 17:06:38 crc kubenswrapper[4812]: I1125 17:06:38.717719 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 25 17:06:41 crc kubenswrapper[4812]: I1125 17:06:41.736667 4812 generic.go:334] "Generic (PLEG): container finished" podID="12587876-fcb8-487a-a197-0696ac90f57d" containerID="05e90a883068b3bbd7169aa3a5de41d93dd52038953f351098c8e81af14c0bf9" exitCode=0 Nov 25 17:06:41 crc kubenswrapper[4812]: I1125 17:06:41.736778 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-cr76j" event={"ID":"12587876-fcb8-487a-a197-0696ac90f57d","Type":"ContainerDied","Data":"05e90a883068b3bbd7169aa3a5de41d93dd52038953f351098c8e81af14c0bf9"} Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.058592 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-cr76j" Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.150956 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/12587876-fcb8-487a-a197-0696ac90f57d-scripts\") pod \"12587876-fcb8-487a-a197-0696ac90f57d\" (UID: \"12587876-fcb8-487a-a197-0696ac90f57d\") " Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.151056 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12587876-fcb8-487a-a197-0696ac90f57d-config-data\") pod \"12587876-fcb8-487a-a197-0696ac90f57d\" (UID: \"12587876-fcb8-487a-a197-0696ac90f57d\") " Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.151102 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v6zkv\" (UniqueName: \"kubernetes.io/projected/12587876-fcb8-487a-a197-0696ac90f57d-kube-api-access-v6zkv\") pod \"12587876-fcb8-487a-a197-0696ac90f57d\" (UID: \"12587876-fcb8-487a-a197-0696ac90f57d\") " Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.151144 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12587876-fcb8-487a-a197-0696ac90f57d-combined-ca-bundle\") pod \"12587876-fcb8-487a-a197-0696ac90f57d\" (UID: \"12587876-fcb8-487a-a197-0696ac90f57d\") " Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.156290 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12587876-fcb8-487a-a197-0696ac90f57d-kube-api-access-v6zkv" (OuterVolumeSpecName: "kube-api-access-v6zkv") pod "12587876-fcb8-487a-a197-0696ac90f57d" (UID: "12587876-fcb8-487a-a197-0696ac90f57d"). InnerVolumeSpecName "kube-api-access-v6zkv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.156652 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12587876-fcb8-487a-a197-0696ac90f57d-scripts" (OuterVolumeSpecName: "scripts") pod "12587876-fcb8-487a-a197-0696ac90f57d" (UID: "12587876-fcb8-487a-a197-0696ac90f57d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.165918 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.176954 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12587876-fcb8-487a-a197-0696ac90f57d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "12587876-fcb8-487a-a197-0696ac90f57d" (UID: "12587876-fcb8-487a-a197-0696ac90f57d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.178344 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12587876-fcb8-487a-a197-0696ac90f57d-config-data" (OuterVolumeSpecName: "config-data") pod "12587876-fcb8-487a-a197-0696ac90f57d" (UID: "12587876-fcb8-487a-a197-0696ac90f57d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.196257 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.224378 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.224439 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.235704 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-566b5b7845-z2wbp" Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.254022 4812 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/12587876-fcb8-487a-a197-0696ac90f57d-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.254056 4812 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12587876-fcb8-487a-a197-0696ac90f57d-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.254093 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v6zkv\" (UniqueName: \"kubernetes.io/projected/12587876-fcb8-487a-a197-0696ac90f57d-kube-api-access-v6zkv\") on node \"crc\" DevicePath \"\"" Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.254118 4812 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12587876-fcb8-487a-a197-0696ac90f57d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.315185 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d97fcdd8f-55tn6"] Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.315805 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6d97fcdd8f-55tn6" podUID="75972e9f-7329-4346-b291-05533c8a926a" containerName="dnsmasq-dns" containerID="cri-o://4eb57618a7cddfb38b8cade847a6ed872d79c39bed36ed8694d9559a2bde3b39" gracePeriod=10 Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.760689 4812 generic.go:334] "Generic (PLEG): container finished" podID="75972e9f-7329-4346-b291-05533c8a926a" containerID="4eb57618a7cddfb38b8cade847a6ed872d79c39bed36ed8694d9559a2bde3b39" exitCode=0 Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.760760 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d97fcdd8f-55tn6" event={"ID":"75972e9f-7329-4346-b291-05533c8a926a","Type":"ContainerDied","Data":"4eb57618a7cddfb38b8cade847a6ed872d79c39bed36ed8694d9559a2bde3b39"} Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.760788 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d97fcdd8f-55tn6" event={"ID":"75972e9f-7329-4346-b291-05533c8a926a","Type":"ContainerDied","Data":"6769a4849d582c61bd9fef59a1af825939d678e16be9d7f98a4388f55a8d958c"} Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.760797 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6769a4849d582c61bd9fef59a1af825939d678e16be9d7f98a4388f55a8d958c" Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.765517 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-cr76j" event={"ID":"12587876-fcb8-487a-a197-0696ac90f57d","Type":"ContainerDied","Data":"b74c7548f3448a028713a7336bd3f0434f576132d0a83da3eacdb9e8653c6783"} Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.765703 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b74c7548f3448a028713a7336bd3f0434f576132d0a83da3eacdb9e8653c6783" Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.766045 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-cr76j" Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.766617 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d97fcdd8f-55tn6" Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.830616 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.869460 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/75972e9f-7329-4346-b291-05533c8a926a-dns-svc\") pod \"75972e9f-7329-4346-b291-05533c8a926a\" (UID: \"75972e9f-7329-4346-b291-05533c8a926a\") " Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.869854 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/75972e9f-7329-4346-b291-05533c8a926a-ovsdbserver-nb\") pod \"75972e9f-7329-4346-b291-05533c8a926a\" (UID: \"75972e9f-7329-4346-b291-05533c8a926a\") " Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.869945 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/75972e9f-7329-4346-b291-05533c8a926a-ovsdbserver-sb\") pod \"75972e9f-7329-4346-b291-05533c8a926a\" (UID: \"75972e9f-7329-4346-b291-05533c8a926a\") " Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.870072 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/75972e9f-7329-4346-b291-05533c8a926a-config\") pod \"75972e9f-7329-4346-b291-05533c8a926a\" (UID: \"75972e9f-7329-4346-b291-05533c8a926a\") " Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.870175 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fjvvl\" (UniqueName: \"kubernetes.io/projected/75972e9f-7329-4346-b291-05533c8a926a-kube-api-access-fjvvl\") pod \"75972e9f-7329-4346-b291-05533c8a926a\" (UID: \"75972e9f-7329-4346-b291-05533c8a926a\") " Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.874677 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/75972e9f-7329-4346-b291-05533c8a926a-kube-api-access-fjvvl" (OuterVolumeSpecName: "kube-api-access-fjvvl") pod "75972e9f-7329-4346-b291-05533c8a926a" (UID: "75972e9f-7329-4346-b291-05533c8a926a"). InnerVolumeSpecName "kube-api-access-fjvvl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.920096 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/75972e9f-7329-4346-b291-05533c8a926a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "75972e9f-7329-4346-b291-05533c8a926a" (UID: "75972e9f-7329-4346-b291-05533c8a926a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.935002 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/75972e9f-7329-4346-b291-05533c8a926a-config" (OuterVolumeSpecName: "config") pod "75972e9f-7329-4346-b291-05533c8a926a" (UID: "75972e9f-7329-4346-b291-05533c8a926a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.937109 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/75972e9f-7329-4346-b291-05533c8a926a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "75972e9f-7329-4346-b291-05533c8a926a" (UID: "75972e9f-7329-4346-b291-05533c8a926a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.937286 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.937496 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="f1a4875b-0433-4fad-91d5-3a16227555a5" containerName="nova-api-log" containerID="cri-o://60962ccbe8ba43bf6400feda28e833517a4f42b13916b020964662845c3c159e" gracePeriod=30 Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.937802 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="f1a4875b-0433-4fad-91d5-3a16227555a5" containerName="nova-api-api" containerID="cri-o://4f19d1ff5cdbdecc0bca55433cee8bba1181039ce7046340588cce3b6d485a6b" gracePeriod=30 Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.942113 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="f1a4875b-0433-4fad-91d5-3a16227555a5" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.175:8774/\": EOF" Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.942135 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="f1a4875b-0433-4fad-91d5-3a16227555a5" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.175:8774/\": EOF" Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.954624 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/75972e9f-7329-4346-b291-05533c8a926a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "75972e9f-7329-4346-b291-05533c8a926a" (UID: "75972e9f-7329-4346-b291-05533c8a926a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.972755 4812 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/75972e9f-7329-4346-b291-05533c8a926a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.972788 4812 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/75972e9f-7329-4346-b291-05533c8a926a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.972800 4812 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/75972e9f-7329-4346-b291-05533c8a926a-config\") on node \"crc\" DevicePath \"\"" Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.972809 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fjvvl\" (UniqueName: \"kubernetes.io/projected/75972e9f-7329-4346-b291-05533c8a926a-kube-api-access-fjvvl\") on node \"crc\" DevicePath \"\"" Nov 25 17:06:43 crc kubenswrapper[4812]: I1125 17:06:43.972818 4812 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/75972e9f-7329-4346-b291-05533c8a926a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 17:06:44 crc kubenswrapper[4812]: I1125 17:06:44.290376 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 17:06:44 crc kubenswrapper[4812]: I1125 17:06:44.774462 4812 generic.go:334] "Generic (PLEG): container finished" podID="f1a4875b-0433-4fad-91d5-3a16227555a5" containerID="60962ccbe8ba43bf6400feda28e833517a4f42b13916b020964662845c3c159e" exitCode=143 Nov 25 17:06:44 crc kubenswrapper[4812]: I1125 17:06:44.774592 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d97fcdd8f-55tn6" Nov 25 17:06:44 crc kubenswrapper[4812]: I1125 17:06:44.774599 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f1a4875b-0433-4fad-91d5-3a16227555a5","Type":"ContainerDied","Data":"60962ccbe8ba43bf6400feda28e833517a4f42b13916b020964662845c3c159e"} Nov 25 17:06:44 crc kubenswrapper[4812]: I1125 17:06:44.810170 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d97fcdd8f-55tn6"] Nov 25 17:06:44 crc kubenswrapper[4812]: I1125 17:06:44.819056 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6d97fcdd8f-55tn6"] Nov 25 17:06:45 crc kubenswrapper[4812]: I1125 17:06:45.782752 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="b77a55e6-65fa-46ab-9297-c363932f9219" containerName="nova-scheduler-scheduler" containerID="cri-o://0ce8326fd6912628dd9c45adf6fff35f1f72e0eafaf436c8c88865b03f78da58" gracePeriod=30 Nov 25 17:06:45 crc kubenswrapper[4812]: I1125 17:06:45.841914 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="75972e9f-7329-4346-b291-05533c8a926a" path="/var/lib/kubelet/pods/75972e9f-7329-4346-b291-05533c8a926a/volumes" Nov 25 17:06:47 crc kubenswrapper[4812]: I1125 17:06:47.809546 4812 generic.go:334] "Generic (PLEG): container finished" podID="0b37c593-c2f5-4304-bfeb-820d518bce9f" containerID="a2a5f55ae19d352942c7ceb265494110ba04e0647736567a174f341cfda71271" exitCode=0 Nov 25 17:06:47 crc kubenswrapper[4812]: I1125 17:06:47.809621 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-2kj2k" event={"ID":"0b37c593-c2f5-4304-bfeb-820d518bce9f","Type":"ContainerDied","Data":"a2a5f55ae19d352942c7ceb265494110ba04e0647736567a174f341cfda71271"} Nov 25 17:06:48 crc kubenswrapper[4812]: E1125 17:06:48.167584 4812 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0ce8326fd6912628dd9c45adf6fff35f1f72e0eafaf436c8c88865b03f78da58" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 17:06:48 crc kubenswrapper[4812]: E1125 17:06:48.168871 4812 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0ce8326fd6912628dd9c45adf6fff35f1f72e0eafaf436c8c88865b03f78da58" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 17:06:48 crc kubenswrapper[4812]: E1125 17:06:48.170510 4812 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0ce8326fd6912628dd9c45adf6fff35f1f72e0eafaf436c8c88865b03f78da58" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 17:06:48 crc kubenswrapper[4812]: E1125 17:06:48.170589 4812 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="b77a55e6-65fa-46ab-9297-c363932f9219" containerName="nova-scheduler-scheduler" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.143659 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-2kj2k" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.274007 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b37c593-c2f5-4304-bfeb-820d518bce9f-combined-ca-bundle\") pod \"0b37c593-c2f5-4304-bfeb-820d518bce9f\" (UID: \"0b37c593-c2f5-4304-bfeb-820d518bce9f\") " Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.274128 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0b37c593-c2f5-4304-bfeb-820d518bce9f-scripts\") pod \"0b37c593-c2f5-4304-bfeb-820d518bce9f\" (UID: \"0b37c593-c2f5-4304-bfeb-820d518bce9f\") " Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.274274 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b37c593-c2f5-4304-bfeb-820d518bce9f-config-data\") pod \"0b37c593-c2f5-4304-bfeb-820d518bce9f\" (UID: \"0b37c593-c2f5-4304-bfeb-820d518bce9f\") " Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.274306 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pqc4w\" (UniqueName: \"kubernetes.io/projected/0b37c593-c2f5-4304-bfeb-820d518bce9f-kube-api-access-pqc4w\") pod \"0b37c593-c2f5-4304-bfeb-820d518bce9f\" (UID: \"0b37c593-c2f5-4304-bfeb-820d518bce9f\") " Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.282253 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b37c593-c2f5-4304-bfeb-820d518bce9f-scripts" (OuterVolumeSpecName: "scripts") pod "0b37c593-c2f5-4304-bfeb-820d518bce9f" (UID: "0b37c593-c2f5-4304-bfeb-820d518bce9f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.283987 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b37c593-c2f5-4304-bfeb-820d518bce9f-kube-api-access-pqc4w" (OuterVolumeSpecName: "kube-api-access-pqc4w") pod "0b37c593-c2f5-4304-bfeb-820d518bce9f" (UID: "0b37c593-c2f5-4304-bfeb-820d518bce9f"). InnerVolumeSpecName "kube-api-access-pqc4w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.308669 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b37c593-c2f5-4304-bfeb-820d518bce9f-config-data" (OuterVolumeSpecName: "config-data") pod "0b37c593-c2f5-4304-bfeb-820d518bce9f" (UID: "0b37c593-c2f5-4304-bfeb-820d518bce9f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.316424 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b37c593-c2f5-4304-bfeb-820d518bce9f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0b37c593-c2f5-4304-bfeb-820d518bce9f" (UID: "0b37c593-c2f5-4304-bfeb-820d518bce9f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.377999 4812 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b37c593-c2f5-4304-bfeb-820d518bce9f-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.378033 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pqc4w\" (UniqueName: \"kubernetes.io/projected/0b37c593-c2f5-4304-bfeb-820d518bce9f-kube-api-access-pqc4w\") on node \"crc\" DevicePath \"\"" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.378044 4812 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b37c593-c2f5-4304-bfeb-820d518bce9f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.378052 4812 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0b37c593-c2f5-4304-bfeb-820d518bce9f-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.448143 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.580904 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b77a55e6-65fa-46ab-9297-c363932f9219-combined-ca-bundle\") pod \"b77a55e6-65fa-46ab-9297-c363932f9219\" (UID: \"b77a55e6-65fa-46ab-9297-c363932f9219\") " Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.581081 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z6k29\" (UniqueName: \"kubernetes.io/projected/b77a55e6-65fa-46ab-9297-c363932f9219-kube-api-access-z6k29\") pod \"b77a55e6-65fa-46ab-9297-c363932f9219\" (UID: \"b77a55e6-65fa-46ab-9297-c363932f9219\") " Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.581177 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b77a55e6-65fa-46ab-9297-c363932f9219-config-data\") pod \"b77a55e6-65fa-46ab-9297-c363932f9219\" (UID: \"b77a55e6-65fa-46ab-9297-c363932f9219\") " Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.592743 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b77a55e6-65fa-46ab-9297-c363932f9219-kube-api-access-z6k29" (OuterVolumeSpecName: "kube-api-access-z6k29") pod "b77a55e6-65fa-46ab-9297-c363932f9219" (UID: "b77a55e6-65fa-46ab-9297-c363932f9219"). InnerVolumeSpecName "kube-api-access-z6k29". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.603397 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b77a55e6-65fa-46ab-9297-c363932f9219-config-data" (OuterVolumeSpecName: "config-data") pod "b77a55e6-65fa-46ab-9297-c363932f9219" (UID: "b77a55e6-65fa-46ab-9297-c363932f9219"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.609249 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b77a55e6-65fa-46ab-9297-c363932f9219-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b77a55e6-65fa-46ab-9297-c363932f9219" (UID: "b77a55e6-65fa-46ab-9297-c363932f9219"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.619993 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.683034 4812 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b77a55e6-65fa-46ab-9297-c363932f9219-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.683069 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z6k29\" (UniqueName: \"kubernetes.io/projected/b77a55e6-65fa-46ab-9297-c363932f9219-kube-api-access-z6k29\") on node \"crc\" DevicePath \"\"" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.683081 4812 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b77a55e6-65fa-46ab-9297-c363932f9219-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.784566 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1a4875b-0433-4fad-91d5-3a16227555a5-combined-ca-bundle\") pod \"f1a4875b-0433-4fad-91d5-3a16227555a5\" (UID: \"f1a4875b-0433-4fad-91d5-3a16227555a5\") " Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.784846 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1a4875b-0433-4fad-91d5-3a16227555a5-config-data\") pod \"f1a4875b-0433-4fad-91d5-3a16227555a5\" (UID: \"f1a4875b-0433-4fad-91d5-3a16227555a5\") " Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.784868 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f1a4875b-0433-4fad-91d5-3a16227555a5-logs\") pod \"f1a4875b-0433-4fad-91d5-3a16227555a5\" (UID: \"f1a4875b-0433-4fad-91d5-3a16227555a5\") " Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.784909 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2kvhq\" (UniqueName: \"kubernetes.io/projected/f1a4875b-0433-4fad-91d5-3a16227555a5-kube-api-access-2kvhq\") pod \"f1a4875b-0433-4fad-91d5-3a16227555a5\" (UID: \"f1a4875b-0433-4fad-91d5-3a16227555a5\") " Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.785653 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f1a4875b-0433-4fad-91d5-3a16227555a5-logs" (OuterVolumeSpecName: "logs") pod "f1a4875b-0433-4fad-91d5-3a16227555a5" (UID: "f1a4875b-0433-4fad-91d5-3a16227555a5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.788350 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f1a4875b-0433-4fad-91d5-3a16227555a5-kube-api-access-2kvhq" (OuterVolumeSpecName: "kube-api-access-2kvhq") pod "f1a4875b-0433-4fad-91d5-3a16227555a5" (UID: "f1a4875b-0433-4fad-91d5-3a16227555a5"). InnerVolumeSpecName "kube-api-access-2kvhq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.807973 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1a4875b-0433-4fad-91d5-3a16227555a5-config-data" (OuterVolumeSpecName: "config-data") pod "f1a4875b-0433-4fad-91d5-3a16227555a5" (UID: "f1a4875b-0433-4fad-91d5-3a16227555a5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.809331 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1a4875b-0433-4fad-91d5-3a16227555a5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f1a4875b-0433-4fad-91d5-3a16227555a5" (UID: "f1a4875b-0433-4fad-91d5-3a16227555a5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.827897 4812 generic.go:334] "Generic (PLEG): container finished" podID="f1a4875b-0433-4fad-91d5-3a16227555a5" containerID="4f19d1ff5cdbdecc0bca55433cee8bba1181039ce7046340588cce3b6d485a6b" exitCode=0 Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.827964 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f1a4875b-0433-4fad-91d5-3a16227555a5","Type":"ContainerDied","Data":"4f19d1ff5cdbdecc0bca55433cee8bba1181039ce7046340588cce3b6d485a6b"} Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.828030 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f1a4875b-0433-4fad-91d5-3a16227555a5","Type":"ContainerDied","Data":"dda836ced9704b1ed876c12e7b9f9fbac532c1811bf543bf7161cb6bfd469749"} Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.828050 4812 scope.go:117] "RemoveContainer" containerID="4f19d1ff5cdbdecc0bca55433cee8bba1181039ce7046340588cce3b6d485a6b" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.828234 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.829693 4812 generic.go:334] "Generic (PLEG): container finished" podID="b77a55e6-65fa-46ab-9297-c363932f9219" containerID="0ce8326fd6912628dd9c45adf6fff35f1f72e0eafaf436c8c88865b03f78da58" exitCode=0 Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.829743 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.829736 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"b77a55e6-65fa-46ab-9297-c363932f9219","Type":"ContainerDied","Data":"0ce8326fd6912628dd9c45adf6fff35f1f72e0eafaf436c8c88865b03f78da58"} Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.829856 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"b77a55e6-65fa-46ab-9297-c363932f9219","Type":"ContainerDied","Data":"327efb9cfdb2cf3480e1bb2b135f9a35c0eedfd9d371f68c8e86430ee7bab8aa"} Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.832426 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-2kj2k" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.844268 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-2kj2k" event={"ID":"0b37c593-c2f5-4304-bfeb-820d518bce9f","Type":"ContainerDied","Data":"0987aee6bc6ed93dfa52029779a793a3f6f780e83b5bca553179465d7ca1d0ca"} Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.844309 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0987aee6bc6ed93dfa52029779a793a3f6f780e83b5bca553179465d7ca1d0ca" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.852843 4812 scope.go:117] "RemoveContainer" containerID="60962ccbe8ba43bf6400feda28e833517a4f42b13916b020964662845c3c159e" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.874734 4812 scope.go:117] "RemoveContainer" containerID="4f19d1ff5cdbdecc0bca55433cee8bba1181039ce7046340588cce3b6d485a6b" Nov 25 17:06:49 crc kubenswrapper[4812]: E1125 17:06:49.879621 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4f19d1ff5cdbdecc0bca55433cee8bba1181039ce7046340588cce3b6d485a6b\": container with ID starting with 4f19d1ff5cdbdecc0bca55433cee8bba1181039ce7046340588cce3b6d485a6b not found: ID does not exist" containerID="4f19d1ff5cdbdecc0bca55433cee8bba1181039ce7046340588cce3b6d485a6b" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.879782 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f19d1ff5cdbdecc0bca55433cee8bba1181039ce7046340588cce3b6d485a6b"} err="failed to get container status \"4f19d1ff5cdbdecc0bca55433cee8bba1181039ce7046340588cce3b6d485a6b\": rpc error: code = NotFound desc = could not find container \"4f19d1ff5cdbdecc0bca55433cee8bba1181039ce7046340588cce3b6d485a6b\": container with ID starting with 4f19d1ff5cdbdecc0bca55433cee8bba1181039ce7046340588cce3b6d485a6b not found: ID does not exist" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.879818 4812 scope.go:117] "RemoveContainer" containerID="60962ccbe8ba43bf6400feda28e833517a4f42b13916b020964662845c3c159e" Nov 25 17:06:49 crc kubenswrapper[4812]: E1125 17:06:49.880255 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"60962ccbe8ba43bf6400feda28e833517a4f42b13916b020964662845c3c159e\": container with ID starting with 60962ccbe8ba43bf6400feda28e833517a4f42b13916b020964662845c3c159e not found: ID does not exist" containerID="60962ccbe8ba43bf6400feda28e833517a4f42b13916b020964662845c3c159e" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.880387 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60962ccbe8ba43bf6400feda28e833517a4f42b13916b020964662845c3c159e"} err="failed to get container status \"60962ccbe8ba43bf6400feda28e833517a4f42b13916b020964662845c3c159e\": rpc error: code = NotFound desc = could not find container \"60962ccbe8ba43bf6400feda28e833517a4f42b13916b020964662845c3c159e\": container with ID starting with 60962ccbe8ba43bf6400feda28e833517a4f42b13916b020964662845c3c159e not found: ID does not exist" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.880471 4812 scope.go:117] "RemoveContainer" containerID="0ce8326fd6912628dd9c45adf6fff35f1f72e0eafaf436c8c88865b03f78da58" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.889028 4812 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1a4875b-0433-4fad-91d5-3a16227555a5-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.889073 4812 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f1a4875b-0433-4fad-91d5-3a16227555a5-logs\") on node \"crc\" DevicePath \"\"" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.889092 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2kvhq\" (UniqueName: \"kubernetes.io/projected/f1a4875b-0433-4fad-91d5-3a16227555a5-kube-api-access-2kvhq\") on node \"crc\" DevicePath \"\"" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.889109 4812 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1a4875b-0433-4fad-91d5-3a16227555a5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.908351 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.917133 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.920485 4812 scope.go:117] "RemoveContainer" containerID="0ce8326fd6912628dd9c45adf6fff35f1f72e0eafaf436c8c88865b03f78da58" Nov 25 17:06:49 crc kubenswrapper[4812]: E1125 17:06:49.921125 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0ce8326fd6912628dd9c45adf6fff35f1f72e0eafaf436c8c88865b03f78da58\": container with ID starting with 0ce8326fd6912628dd9c45adf6fff35f1f72e0eafaf436c8c88865b03f78da58 not found: ID does not exist" containerID="0ce8326fd6912628dd9c45adf6fff35f1f72e0eafaf436c8c88865b03f78da58" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.921175 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ce8326fd6912628dd9c45adf6fff35f1f72e0eafaf436c8c88865b03f78da58"} err="failed to get container status \"0ce8326fd6912628dd9c45adf6fff35f1f72e0eafaf436c8c88865b03f78da58\": rpc error: code = NotFound desc = could not find container \"0ce8326fd6912628dd9c45adf6fff35f1f72e0eafaf436c8c88865b03f78da58\": container with ID starting with 0ce8326fd6912628dd9c45adf6fff35f1f72e0eafaf436c8c88865b03f78da58 not found: ID does not exist" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.927642 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 25 17:06:49 crc kubenswrapper[4812]: E1125 17:06:49.928088 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b37c593-c2f5-4304-bfeb-820d518bce9f" containerName="nova-cell1-conductor-db-sync" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.928110 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b37c593-c2f5-4304-bfeb-820d518bce9f" containerName="nova-cell1-conductor-db-sync" Nov 25 17:06:49 crc kubenswrapper[4812]: E1125 17:06:49.928124 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75972e9f-7329-4346-b291-05533c8a926a" containerName="init" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.928133 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="75972e9f-7329-4346-b291-05533c8a926a" containerName="init" Nov 25 17:06:49 crc kubenswrapper[4812]: E1125 17:06:49.928163 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1a4875b-0433-4fad-91d5-3a16227555a5" containerName="nova-api-api" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.928171 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1a4875b-0433-4fad-91d5-3a16227555a5" containerName="nova-api-api" Nov 25 17:06:49 crc kubenswrapper[4812]: E1125 17:06:49.928180 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75972e9f-7329-4346-b291-05533c8a926a" containerName="dnsmasq-dns" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.928187 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="75972e9f-7329-4346-b291-05533c8a926a" containerName="dnsmasq-dns" Nov 25 17:06:49 crc kubenswrapper[4812]: E1125 17:06:49.928198 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1a4875b-0433-4fad-91d5-3a16227555a5" containerName="nova-api-log" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.928204 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1a4875b-0433-4fad-91d5-3a16227555a5" containerName="nova-api-log" Nov 25 17:06:49 crc kubenswrapper[4812]: E1125 17:06:49.928222 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b77a55e6-65fa-46ab-9297-c363932f9219" containerName="nova-scheduler-scheduler" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.928229 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="b77a55e6-65fa-46ab-9297-c363932f9219" containerName="nova-scheduler-scheduler" Nov 25 17:06:49 crc kubenswrapper[4812]: E1125 17:06:49.928243 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12587876-fcb8-487a-a197-0696ac90f57d" containerName="nova-manage" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.928250 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="12587876-fcb8-487a-a197-0696ac90f57d" containerName="nova-manage" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.928474 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="75972e9f-7329-4346-b291-05533c8a926a" containerName="dnsmasq-dns" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.928489 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="12587876-fcb8-487a-a197-0696ac90f57d" containerName="nova-manage" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.928519 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1a4875b-0433-4fad-91d5-3a16227555a5" containerName="nova-api-log" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.928558 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1a4875b-0433-4fad-91d5-3a16227555a5" containerName="nova-api-api" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.928572 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="b77a55e6-65fa-46ab-9297-c363932f9219" containerName="nova-scheduler-scheduler" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.928592 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b37c593-c2f5-4304-bfeb-820d518bce9f" containerName="nova-cell1-conductor-db-sync" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.929275 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.932278 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.947029 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.956710 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.958574 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.961277 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.965351 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 17:06:49 crc kubenswrapper[4812]: I1125 17:06:49.992540 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.001309 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.008169 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.009286 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.011511 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.015738 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.097283 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8d09107-bd88-4bed-a3b3-f153248a2df0-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"b8d09107-bd88-4bed-a3b3-f153248a2df0\") " pod="openstack/nova-cell1-conductor-0" Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.097449 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a-config-data\") pod \"nova-api-0\" (UID: \"4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a\") " pod="openstack/nova-api-0" Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.097561 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rvvmm\" (UniqueName: \"kubernetes.io/projected/b8d09107-bd88-4bed-a3b3-f153248a2df0-kube-api-access-rvvmm\") pod \"nova-cell1-conductor-0\" (UID: \"b8d09107-bd88-4bed-a3b3-f153248a2df0\") " pod="openstack/nova-cell1-conductor-0" Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.097616 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a-logs\") pod \"nova-api-0\" (UID: \"4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a\") " pod="openstack/nova-api-0" Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.097685 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z55rm\" (UniqueName: \"kubernetes.io/projected/4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a-kube-api-access-z55rm\") pod \"nova-api-0\" (UID: \"4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a\") " pod="openstack/nova-api-0" Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.097800 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8d09107-bd88-4bed-a3b3-f153248a2df0-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"b8d09107-bd88-4bed-a3b3-f153248a2df0\") " pod="openstack/nova-cell1-conductor-0" Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.097887 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a\") " pod="openstack/nova-api-0" Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.200095 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8d09107-bd88-4bed-a3b3-f153248a2df0-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"b8d09107-bd88-4bed-a3b3-f153248a2df0\") " pod="openstack/nova-cell1-conductor-0" Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.200167 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a-config-data\") pod \"nova-api-0\" (UID: \"4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a\") " pod="openstack/nova-api-0" Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.200217 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rvvmm\" (UniqueName: \"kubernetes.io/projected/b8d09107-bd88-4bed-a3b3-f153248a2df0-kube-api-access-rvvmm\") pod \"nova-cell1-conductor-0\" (UID: \"b8d09107-bd88-4bed-a3b3-f153248a2df0\") " pod="openstack/nova-cell1-conductor-0" Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.200499 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a-logs\") pod \"nova-api-0\" (UID: \"4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a\") " pod="openstack/nova-api-0" Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.200564 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mmpln\" (UniqueName: \"kubernetes.io/projected/8528f1d6-993c-4104-aa01-a67ed54fc82b-kube-api-access-mmpln\") pod \"nova-scheduler-0\" (UID: \"8528f1d6-993c-4104-aa01-a67ed54fc82b\") " pod="openstack/nova-scheduler-0" Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.200625 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z55rm\" (UniqueName: \"kubernetes.io/projected/4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a-kube-api-access-z55rm\") pod \"nova-api-0\" (UID: \"4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a\") " pod="openstack/nova-api-0" Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.200846 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8528f1d6-993c-4104-aa01-a67ed54fc82b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"8528f1d6-993c-4104-aa01-a67ed54fc82b\") " pod="openstack/nova-scheduler-0" Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.200863 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8d09107-bd88-4bed-a3b3-f153248a2df0-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"b8d09107-bd88-4bed-a3b3-f153248a2df0\") " pod="openstack/nova-cell1-conductor-0" Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.201242 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a-logs\") pod \"nova-api-0\" (UID: \"4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a\") " pod="openstack/nova-api-0" Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.200879 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8528f1d6-993c-4104-aa01-a67ed54fc82b-config-data\") pod \"nova-scheduler-0\" (UID: \"8528f1d6-993c-4104-aa01-a67ed54fc82b\") " pod="openstack/nova-scheduler-0" Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.201682 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a\") " pod="openstack/nova-api-0" Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.204725 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a-config-data\") pod \"nova-api-0\" (UID: \"4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a\") " pod="openstack/nova-api-0" Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.204737 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8d09107-bd88-4bed-a3b3-f153248a2df0-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"b8d09107-bd88-4bed-a3b3-f153248a2df0\") " pod="openstack/nova-cell1-conductor-0" Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.204876 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8d09107-bd88-4bed-a3b3-f153248a2df0-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"b8d09107-bd88-4bed-a3b3-f153248a2df0\") " pod="openstack/nova-cell1-conductor-0" Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.205322 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a\") " pod="openstack/nova-api-0" Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.216383 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z55rm\" (UniqueName: \"kubernetes.io/projected/4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a-kube-api-access-z55rm\") pod \"nova-api-0\" (UID: \"4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a\") " pod="openstack/nova-api-0" Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.217760 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rvvmm\" (UniqueName: \"kubernetes.io/projected/b8d09107-bd88-4bed-a3b3-f153248a2df0-kube-api-access-rvvmm\") pod \"nova-cell1-conductor-0\" (UID: \"b8d09107-bd88-4bed-a3b3-f153248a2df0\") " pod="openstack/nova-cell1-conductor-0" Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.246916 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.278872 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.302942 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8528f1d6-993c-4104-aa01-a67ed54fc82b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"8528f1d6-993c-4104-aa01-a67ed54fc82b\") " pod="openstack/nova-scheduler-0" Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.303000 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8528f1d6-993c-4104-aa01-a67ed54fc82b-config-data\") pod \"nova-scheduler-0\" (UID: \"8528f1d6-993c-4104-aa01-a67ed54fc82b\") " pod="openstack/nova-scheduler-0" Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.303162 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mmpln\" (UniqueName: \"kubernetes.io/projected/8528f1d6-993c-4104-aa01-a67ed54fc82b-kube-api-access-mmpln\") pod \"nova-scheduler-0\" (UID: \"8528f1d6-993c-4104-aa01-a67ed54fc82b\") " pod="openstack/nova-scheduler-0" Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.307700 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8528f1d6-993c-4104-aa01-a67ed54fc82b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"8528f1d6-993c-4104-aa01-a67ed54fc82b\") " pod="openstack/nova-scheduler-0" Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.307780 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8528f1d6-993c-4104-aa01-a67ed54fc82b-config-data\") pod \"nova-scheduler-0\" (UID: \"8528f1d6-993c-4104-aa01-a67ed54fc82b\") " pod="openstack/nova-scheduler-0" Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.322933 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mmpln\" (UniqueName: \"kubernetes.io/projected/8528f1d6-993c-4104-aa01-a67ed54fc82b-kube-api-access-mmpln\") pod \"nova-scheduler-0\" (UID: \"8528f1d6-993c-4104-aa01-a67ed54fc82b\") " pod="openstack/nova-scheduler-0" Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.324584 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.703797 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 25 17:06:50 crc kubenswrapper[4812]: W1125 17:06:50.707143 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb8d09107_bd88_4bed_a3b3_f153248a2df0.slice/crio-518f5046b563754d5056298194b931b05f91790db3672ad289a9a2cea3c5d778 WatchSource:0}: Error finding container 518f5046b563754d5056298194b931b05f91790db3672ad289a9a2cea3c5d778: Status 404 returned error can't find the container with id 518f5046b563754d5056298194b931b05f91790db3672ad289a9a2cea3c5d778 Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.779202 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 17:06:50 crc kubenswrapper[4812]: W1125 17:06:50.779865 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4ac60428_2fe2_4b32_8eb4_1ede2cad4e5a.slice/crio-5499cc3423673af1eded558035ca49dcf3f0a36979f81216f23471ac33fd05a3 WatchSource:0}: Error finding container 5499cc3423673af1eded558035ca49dcf3f0a36979f81216f23471ac33fd05a3: Status 404 returned error can't find the container with id 5499cc3423673af1eded558035ca49dcf3f0a36979f81216f23471ac33fd05a3 Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.841851 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"b8d09107-bd88-4bed-a3b3-f153248a2df0","Type":"ContainerStarted","Data":"518f5046b563754d5056298194b931b05f91790db3672ad289a9a2cea3c5d778"} Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.844833 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a","Type":"ContainerStarted","Data":"5499cc3423673af1eded558035ca49dcf3f0a36979f81216f23471ac33fd05a3"} Nov 25 17:06:50 crc kubenswrapper[4812]: I1125 17:06:50.846961 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 17:06:50 crc kubenswrapper[4812]: W1125 17:06:50.848294 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8528f1d6_993c_4104_aa01_a67ed54fc82b.slice/crio-e599bd0dc215fe8c655c1dd57cf271e7f03053f0eb7e5ac3fe852835efeccce4 WatchSource:0}: Error finding container e599bd0dc215fe8c655c1dd57cf271e7f03053f0eb7e5ac3fe852835efeccce4: Status 404 returned error can't find the container with id e599bd0dc215fe8c655c1dd57cf271e7f03053f0eb7e5ac3fe852835efeccce4 Nov 25 17:06:51 crc kubenswrapper[4812]: I1125 17:06:51.843814 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b77a55e6-65fa-46ab-9297-c363932f9219" path="/var/lib/kubelet/pods/b77a55e6-65fa-46ab-9297-c363932f9219/volumes" Nov 25 17:06:51 crc kubenswrapper[4812]: I1125 17:06:51.844730 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f1a4875b-0433-4fad-91d5-3a16227555a5" path="/var/lib/kubelet/pods/f1a4875b-0433-4fad-91d5-3a16227555a5/volumes" Nov 25 17:06:51 crc kubenswrapper[4812]: I1125 17:06:51.854021 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a","Type":"ContainerStarted","Data":"e6ab1bbd119f9db68e36d31d567fb111091b89a8a34a5fb8172d10cc35209c61"} Nov 25 17:06:51 crc kubenswrapper[4812]: I1125 17:06:51.854092 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a","Type":"ContainerStarted","Data":"9de63679e6f433d98a86a745a1f7493352a013f266a1e7f28aeddf3c01df43ac"} Nov 25 17:06:51 crc kubenswrapper[4812]: I1125 17:06:51.856876 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"8528f1d6-993c-4104-aa01-a67ed54fc82b","Type":"ContainerStarted","Data":"628353db558324cf8d470abd5746d894943d5cc71b4723d413d7caaeb963bc3c"} Nov 25 17:06:51 crc kubenswrapper[4812]: I1125 17:06:51.856916 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"8528f1d6-993c-4104-aa01-a67ed54fc82b","Type":"ContainerStarted","Data":"e599bd0dc215fe8c655c1dd57cf271e7f03053f0eb7e5ac3fe852835efeccce4"} Nov 25 17:06:51 crc kubenswrapper[4812]: I1125 17:06:51.859570 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"b8d09107-bd88-4bed-a3b3-f153248a2df0","Type":"ContainerStarted","Data":"337dc9043d7f40f9d55d2f463e7fd5598cb07d698229059f1adec72d618f2dec"} Nov 25 17:06:51 crc kubenswrapper[4812]: I1125 17:06:51.859706 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 25 17:06:51 crc kubenswrapper[4812]: I1125 17:06:51.876712 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.876688543 podStartE2EDuration="2.876688543s" podCreationTimestamp="2025-11-25 17:06:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:06:51.869643781 +0000 UTC m=+1186.709785886" watchObservedRunningTime="2025-11-25 17:06:51.876688543 +0000 UTC m=+1186.716830638" Nov 25 17:06:51 crc kubenswrapper[4812]: I1125 17:06:51.888091 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.888069454 podStartE2EDuration="2.888069454s" podCreationTimestamp="2025-11-25 17:06:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:06:51.887112548 +0000 UTC m=+1186.727254653" watchObservedRunningTime="2025-11-25 17:06:51.888069454 +0000 UTC m=+1186.728211569" Nov 25 17:06:51 crc kubenswrapper[4812]: I1125 17:06:51.906437 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.906417255 podStartE2EDuration="2.906417255s" podCreationTimestamp="2025-11-25 17:06:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:06:51.901979714 +0000 UTC m=+1186.742121809" watchObservedRunningTime="2025-11-25 17:06:51.906417255 +0000 UTC m=+1186.746559350" Nov 25 17:06:55 crc kubenswrapper[4812]: I1125 17:06:55.276625 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 25 17:06:55 crc kubenswrapper[4812]: I1125 17:06:55.324729 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 25 17:06:57 crc kubenswrapper[4812]: I1125 17:06:57.333013 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:06:57 crc kubenswrapper[4812]: I1125 17:06:57.333826 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:07:00 crc kubenswrapper[4812]: I1125 17:07:00.279454 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 17:07:00 crc kubenswrapper[4812]: I1125 17:07:00.279830 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 17:07:00 crc kubenswrapper[4812]: I1125 17:07:00.324894 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 25 17:07:00 crc kubenswrapper[4812]: I1125 17:07:00.352215 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 25 17:07:00 crc kubenswrapper[4812]: I1125 17:07:00.987803 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 25 17:07:01 crc kubenswrapper[4812]: I1125 17:07:01.362028 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.180:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 17:07:01 crc kubenswrapper[4812]: I1125 17:07:01.362047 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.180:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 17:07:08 crc kubenswrapper[4812]: I1125 17:07:08.009449 4812 generic.go:334] "Generic (PLEG): container finished" podID="c91d38e2-27e4-4f64-a8bc-84c070c8096e" containerID="96b74e481278bc080f065227435d8424f93c75de3c3152f7c4c2a1dcb1fe9c1a" exitCode=137 Nov 25 17:07:08 crc kubenswrapper[4812]: I1125 17:07:08.009495 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c91d38e2-27e4-4f64-a8bc-84c070c8096e","Type":"ContainerDied","Data":"96b74e481278bc080f065227435d8424f93c75de3c3152f7c4c2a1dcb1fe9c1a"} Nov 25 17:07:08 crc kubenswrapper[4812]: I1125 17:07:08.010094 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c91d38e2-27e4-4f64-a8bc-84c070c8096e","Type":"ContainerDied","Data":"b545c13d1a9b8ead2f7ad02c47c24ecfcb25917c6631f2e4dae4215746adeda0"} Nov 25 17:07:08 crc kubenswrapper[4812]: I1125 17:07:08.010111 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b545c13d1a9b8ead2f7ad02c47c24ecfcb25917c6631f2e4dae4215746adeda0" Nov 25 17:07:08 crc kubenswrapper[4812]: I1125 17:07:08.011937 4812 generic.go:334] "Generic (PLEG): container finished" podID="22e89ee4-7aad-4b7b-a315-14593ff5ddfb" containerID="3b7eb54e380529cdc8d9fa51b54e901d06dffcafa5eb6fba1c3e1efe71d8e771" exitCode=137 Nov 25 17:07:08 crc kubenswrapper[4812]: I1125 17:07:08.011979 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"22e89ee4-7aad-4b7b-a315-14593ff5ddfb","Type":"ContainerDied","Data":"3b7eb54e380529cdc8d9fa51b54e901d06dffcafa5eb6fba1c3e1efe71d8e771"} Nov 25 17:07:08 crc kubenswrapper[4812]: I1125 17:07:08.052407 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 17:07:08 crc kubenswrapper[4812]: I1125 17:07:08.120965 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c91d38e2-27e4-4f64-a8bc-84c070c8096e-combined-ca-bundle\") pod \"c91d38e2-27e4-4f64-a8bc-84c070c8096e\" (UID: \"c91d38e2-27e4-4f64-a8bc-84c070c8096e\") " Nov 25 17:07:08 crc kubenswrapper[4812]: I1125 17:07:08.121083 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c91d38e2-27e4-4f64-a8bc-84c070c8096e-config-data\") pod \"c91d38e2-27e4-4f64-a8bc-84c070c8096e\" (UID: \"c91d38e2-27e4-4f64-a8bc-84c070c8096e\") " Nov 25 17:07:08 crc kubenswrapper[4812]: I1125 17:07:08.121152 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t2vv6\" (UniqueName: \"kubernetes.io/projected/c91d38e2-27e4-4f64-a8bc-84c070c8096e-kube-api-access-t2vv6\") pod \"c91d38e2-27e4-4f64-a8bc-84c070c8096e\" (UID: \"c91d38e2-27e4-4f64-a8bc-84c070c8096e\") " Nov 25 17:07:08 crc kubenswrapper[4812]: I1125 17:07:08.121244 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c91d38e2-27e4-4f64-a8bc-84c070c8096e-logs\") pod \"c91d38e2-27e4-4f64-a8bc-84c070c8096e\" (UID: \"c91d38e2-27e4-4f64-a8bc-84c070c8096e\") " Nov 25 17:07:08 crc kubenswrapper[4812]: I1125 17:07:08.121896 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c91d38e2-27e4-4f64-a8bc-84c070c8096e-logs" (OuterVolumeSpecName: "logs") pod "c91d38e2-27e4-4f64-a8bc-84c070c8096e" (UID: "c91d38e2-27e4-4f64-a8bc-84c070c8096e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:07:08 crc kubenswrapper[4812]: I1125 17:07:08.126156 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c91d38e2-27e4-4f64-a8bc-84c070c8096e-kube-api-access-t2vv6" (OuterVolumeSpecName: "kube-api-access-t2vv6") pod "c91d38e2-27e4-4f64-a8bc-84c070c8096e" (UID: "c91d38e2-27e4-4f64-a8bc-84c070c8096e"). InnerVolumeSpecName "kube-api-access-t2vv6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:07:08 crc kubenswrapper[4812]: I1125 17:07:08.148522 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c91d38e2-27e4-4f64-a8bc-84c070c8096e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c91d38e2-27e4-4f64-a8bc-84c070c8096e" (UID: "c91d38e2-27e4-4f64-a8bc-84c070c8096e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:07:08 crc kubenswrapper[4812]: I1125 17:07:08.148871 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 17:07:08 crc kubenswrapper[4812]: I1125 17:07:08.149703 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c91d38e2-27e4-4f64-a8bc-84c070c8096e-config-data" (OuterVolumeSpecName: "config-data") pod "c91d38e2-27e4-4f64-a8bc-84c070c8096e" (UID: "c91d38e2-27e4-4f64-a8bc-84c070c8096e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:07:08 crc kubenswrapper[4812]: I1125 17:07:08.223045 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/22e89ee4-7aad-4b7b-a315-14593ff5ddfb-config-data\") pod \"22e89ee4-7aad-4b7b-a315-14593ff5ddfb\" (UID: \"22e89ee4-7aad-4b7b-a315-14593ff5ddfb\") " Nov 25 17:07:08 crc kubenswrapper[4812]: I1125 17:07:08.223128 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fzpjv\" (UniqueName: \"kubernetes.io/projected/22e89ee4-7aad-4b7b-a315-14593ff5ddfb-kube-api-access-fzpjv\") pod \"22e89ee4-7aad-4b7b-a315-14593ff5ddfb\" (UID: \"22e89ee4-7aad-4b7b-a315-14593ff5ddfb\") " Nov 25 17:07:08 crc kubenswrapper[4812]: I1125 17:07:08.223319 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/22e89ee4-7aad-4b7b-a315-14593ff5ddfb-combined-ca-bundle\") pod \"22e89ee4-7aad-4b7b-a315-14593ff5ddfb\" (UID: \"22e89ee4-7aad-4b7b-a315-14593ff5ddfb\") " Nov 25 17:07:08 crc kubenswrapper[4812]: I1125 17:07:08.223748 4812 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c91d38e2-27e4-4f64-a8bc-84c070c8096e-logs\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:08 crc kubenswrapper[4812]: I1125 17:07:08.223765 4812 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c91d38e2-27e4-4f64-a8bc-84c070c8096e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:08 crc kubenswrapper[4812]: I1125 17:07:08.223776 4812 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c91d38e2-27e4-4f64-a8bc-84c070c8096e-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:08 crc kubenswrapper[4812]: I1125 17:07:08.223784 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t2vv6\" (UniqueName: \"kubernetes.io/projected/c91d38e2-27e4-4f64-a8bc-84c070c8096e-kube-api-access-t2vv6\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:08 crc kubenswrapper[4812]: I1125 17:07:08.225768 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22e89ee4-7aad-4b7b-a315-14593ff5ddfb-kube-api-access-fzpjv" (OuterVolumeSpecName: "kube-api-access-fzpjv") pod "22e89ee4-7aad-4b7b-a315-14593ff5ddfb" (UID: "22e89ee4-7aad-4b7b-a315-14593ff5ddfb"). InnerVolumeSpecName "kube-api-access-fzpjv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:07:08 crc kubenswrapper[4812]: I1125 17:07:08.249940 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22e89ee4-7aad-4b7b-a315-14593ff5ddfb-config-data" (OuterVolumeSpecName: "config-data") pod "22e89ee4-7aad-4b7b-a315-14593ff5ddfb" (UID: "22e89ee4-7aad-4b7b-a315-14593ff5ddfb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:07:08 crc kubenswrapper[4812]: I1125 17:07:08.252271 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22e89ee4-7aad-4b7b-a315-14593ff5ddfb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "22e89ee4-7aad-4b7b-a315-14593ff5ddfb" (UID: "22e89ee4-7aad-4b7b-a315-14593ff5ddfb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:07:08 crc kubenswrapper[4812]: I1125 17:07:08.325742 4812 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/22e89ee4-7aad-4b7b-a315-14593ff5ddfb-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:08 crc kubenswrapper[4812]: I1125 17:07:08.325772 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fzpjv\" (UniqueName: \"kubernetes.io/projected/22e89ee4-7aad-4b7b-a315-14593ff5ddfb-kube-api-access-fzpjv\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:08 crc kubenswrapper[4812]: I1125 17:07:08.325784 4812 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/22e89ee4-7aad-4b7b-a315-14593ff5ddfb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.020847 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.021648 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"22e89ee4-7aad-4b7b-a315-14593ff5ddfb","Type":"ContainerDied","Data":"ab8c13fecd0d2dffd226173c247b5becd7073e84d35525992811ce6d6af20707"} Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.021690 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.021706 4812 scope.go:117] "RemoveContainer" containerID="3b7eb54e380529cdc8d9fa51b54e901d06dffcafa5eb6fba1c3e1efe71d8e771" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.091649 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.112898 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.120446 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.129607 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.138911 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 17:07:09 crc kubenswrapper[4812]: E1125 17:07:09.139508 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c91d38e2-27e4-4f64-a8bc-84c070c8096e" containerName="nova-metadata-metadata" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.139579 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="c91d38e2-27e4-4f64-a8bc-84c070c8096e" containerName="nova-metadata-metadata" Nov 25 17:07:09 crc kubenswrapper[4812]: E1125 17:07:09.139636 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c91d38e2-27e4-4f64-a8bc-84c070c8096e" containerName="nova-metadata-log" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.139656 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="c91d38e2-27e4-4f64-a8bc-84c070c8096e" containerName="nova-metadata-log" Nov 25 17:07:09 crc kubenswrapper[4812]: E1125 17:07:09.139704 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="22e89ee4-7aad-4b7b-a315-14593ff5ddfb" containerName="nova-cell1-novncproxy-novncproxy" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.139721 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="22e89ee4-7aad-4b7b-a315-14593ff5ddfb" containerName="nova-cell1-novncproxy-novncproxy" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.140077 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="22e89ee4-7aad-4b7b-a315-14593ff5ddfb" containerName="nova-cell1-novncproxy-novncproxy" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.140124 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="c91d38e2-27e4-4f64-a8bc-84c070c8096e" containerName="nova-metadata-log" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.140148 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="c91d38e2-27e4-4f64-a8bc-84c070c8096e" containerName="nova-metadata-metadata" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.141457 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.143980 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.144729 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.146000 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.146218 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.147828 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.149446 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.150866 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.153965 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.162971 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.244393 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/a9ed1d0c-870e-4e4c-8e13-6fbd5f18559e-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"a9ed1d0c-870e-4e4c-8e13-6fbd5f18559e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.244685 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9ed1d0c-870e-4e4c-8e13-6fbd5f18559e-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"a9ed1d0c-870e-4e4c-8e13-6fbd5f18559e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.244792 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9ed1d0c-870e-4e4c-8e13-6fbd5f18559e-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"a9ed1d0c-870e-4e4c-8e13-6fbd5f18559e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.244928 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d2e8567-1611-44ef-b206-e8c0baa4a215-config-data\") pod \"nova-metadata-0\" (UID: \"5d2e8567-1611-44ef-b206-e8c0baa4a215\") " pod="openstack/nova-metadata-0" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.245043 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d2e8567-1611-44ef-b206-e8c0baa4a215-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"5d2e8567-1611-44ef-b206-e8c0baa4a215\") " pod="openstack/nova-metadata-0" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.245154 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/a9ed1d0c-870e-4e4c-8e13-6fbd5f18559e-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"a9ed1d0c-870e-4e4c-8e13-6fbd5f18559e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.245267 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b5frg\" (UniqueName: \"kubernetes.io/projected/5d2e8567-1611-44ef-b206-e8c0baa4a215-kube-api-access-b5frg\") pod \"nova-metadata-0\" (UID: \"5d2e8567-1611-44ef-b206-e8c0baa4a215\") " pod="openstack/nova-metadata-0" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.245383 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5d2e8567-1611-44ef-b206-e8c0baa4a215-logs\") pod \"nova-metadata-0\" (UID: \"5d2e8567-1611-44ef-b206-e8c0baa4a215\") " pod="openstack/nova-metadata-0" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.245487 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tk4k4\" (UniqueName: \"kubernetes.io/projected/a9ed1d0c-870e-4e4c-8e13-6fbd5f18559e-kube-api-access-tk4k4\") pod \"nova-cell1-novncproxy-0\" (UID: \"a9ed1d0c-870e-4e4c-8e13-6fbd5f18559e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.245612 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d2e8567-1611-44ef-b206-e8c0baa4a215-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"5d2e8567-1611-44ef-b206-e8c0baa4a215\") " pod="openstack/nova-metadata-0" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.347252 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/a9ed1d0c-870e-4e4c-8e13-6fbd5f18559e-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"a9ed1d0c-870e-4e4c-8e13-6fbd5f18559e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.347342 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b5frg\" (UniqueName: \"kubernetes.io/projected/5d2e8567-1611-44ef-b206-e8c0baa4a215-kube-api-access-b5frg\") pod \"nova-metadata-0\" (UID: \"5d2e8567-1611-44ef-b206-e8c0baa4a215\") " pod="openstack/nova-metadata-0" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.347432 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5d2e8567-1611-44ef-b206-e8c0baa4a215-logs\") pod \"nova-metadata-0\" (UID: \"5d2e8567-1611-44ef-b206-e8c0baa4a215\") " pod="openstack/nova-metadata-0" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.347476 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tk4k4\" (UniqueName: \"kubernetes.io/projected/a9ed1d0c-870e-4e4c-8e13-6fbd5f18559e-kube-api-access-tk4k4\") pod \"nova-cell1-novncproxy-0\" (UID: \"a9ed1d0c-870e-4e4c-8e13-6fbd5f18559e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.347523 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d2e8567-1611-44ef-b206-e8c0baa4a215-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"5d2e8567-1611-44ef-b206-e8c0baa4a215\") " pod="openstack/nova-metadata-0" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.347699 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/a9ed1d0c-870e-4e4c-8e13-6fbd5f18559e-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"a9ed1d0c-870e-4e4c-8e13-6fbd5f18559e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.347755 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9ed1d0c-870e-4e4c-8e13-6fbd5f18559e-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"a9ed1d0c-870e-4e4c-8e13-6fbd5f18559e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.347808 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9ed1d0c-870e-4e4c-8e13-6fbd5f18559e-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"a9ed1d0c-870e-4e4c-8e13-6fbd5f18559e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.347847 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d2e8567-1611-44ef-b206-e8c0baa4a215-config-data\") pod \"nova-metadata-0\" (UID: \"5d2e8567-1611-44ef-b206-e8c0baa4a215\") " pod="openstack/nova-metadata-0" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.347908 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d2e8567-1611-44ef-b206-e8c0baa4a215-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"5d2e8567-1611-44ef-b206-e8c0baa4a215\") " pod="openstack/nova-metadata-0" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.348410 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5d2e8567-1611-44ef-b206-e8c0baa4a215-logs\") pod \"nova-metadata-0\" (UID: \"5d2e8567-1611-44ef-b206-e8c0baa4a215\") " pod="openstack/nova-metadata-0" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.351509 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d2e8567-1611-44ef-b206-e8c0baa4a215-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"5d2e8567-1611-44ef-b206-e8c0baa4a215\") " pod="openstack/nova-metadata-0" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.352859 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/a9ed1d0c-870e-4e4c-8e13-6fbd5f18559e-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"a9ed1d0c-870e-4e4c-8e13-6fbd5f18559e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.353272 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/a9ed1d0c-870e-4e4c-8e13-6fbd5f18559e-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"a9ed1d0c-870e-4e4c-8e13-6fbd5f18559e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.354661 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9ed1d0c-870e-4e4c-8e13-6fbd5f18559e-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"a9ed1d0c-870e-4e4c-8e13-6fbd5f18559e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.354686 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d2e8567-1611-44ef-b206-e8c0baa4a215-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"5d2e8567-1611-44ef-b206-e8c0baa4a215\") " pod="openstack/nova-metadata-0" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.355863 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9ed1d0c-870e-4e4c-8e13-6fbd5f18559e-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"a9ed1d0c-870e-4e4c-8e13-6fbd5f18559e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.356383 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d2e8567-1611-44ef-b206-e8c0baa4a215-config-data\") pod \"nova-metadata-0\" (UID: \"5d2e8567-1611-44ef-b206-e8c0baa4a215\") " pod="openstack/nova-metadata-0" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.368573 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b5frg\" (UniqueName: \"kubernetes.io/projected/5d2e8567-1611-44ef-b206-e8c0baa4a215-kube-api-access-b5frg\") pod \"nova-metadata-0\" (UID: \"5d2e8567-1611-44ef-b206-e8c0baa4a215\") " pod="openstack/nova-metadata-0" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.374051 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tk4k4\" (UniqueName: \"kubernetes.io/projected/a9ed1d0c-870e-4e4c-8e13-6fbd5f18559e-kube-api-access-tk4k4\") pod \"nova-cell1-novncproxy-0\" (UID: \"a9ed1d0c-870e-4e4c-8e13-6fbd5f18559e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.466059 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.475615 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.847436 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22e89ee4-7aad-4b7b-a315-14593ff5ddfb" path="/var/lib/kubelet/pods/22e89ee4-7aad-4b7b-a315-14593ff5ddfb/volumes" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.849949 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c91d38e2-27e4-4f64-a8bc-84c070c8096e" path="/var/lib/kubelet/pods/c91d38e2-27e4-4f64-a8bc-84c070c8096e/volumes" Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.918860 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 17:07:09 crc kubenswrapper[4812]: I1125 17:07:09.967958 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 17:07:10 crc kubenswrapper[4812]: I1125 17:07:10.030181 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"a9ed1d0c-870e-4e4c-8e13-6fbd5f18559e","Type":"ContainerStarted","Data":"1208706bd334f98ca1bfe7dc1d31b09df5a2ed67b129e62f6aff9206f610ff6e"} Nov 25 17:07:10 crc kubenswrapper[4812]: I1125 17:07:10.032099 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"5d2e8567-1611-44ef-b206-e8c0baa4a215","Type":"ContainerStarted","Data":"b9830381cc13a4b4c8877533229f1bd4cbedde8b115b33181a48aa404be1b157"} Nov 25 17:07:10 crc kubenswrapper[4812]: I1125 17:07:10.284434 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 17:07:10 crc kubenswrapper[4812]: I1125 17:07:10.285089 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 17:07:10 crc kubenswrapper[4812]: I1125 17:07:10.286800 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 17:07:10 crc kubenswrapper[4812]: I1125 17:07:10.289213 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 17:07:11 crc kubenswrapper[4812]: I1125 17:07:11.044912 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"5d2e8567-1611-44ef-b206-e8c0baa4a215","Type":"ContainerStarted","Data":"e1218403a51d5f362a36434c0f6b7587f1068d795f96ce70d56260244ad9d82e"} Nov 25 17:07:11 crc kubenswrapper[4812]: I1125 17:07:11.045222 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"5d2e8567-1611-44ef-b206-e8c0baa4a215","Type":"ContainerStarted","Data":"79561625244e1d4a4572615d18e7034834da0e1795cace38a9da1b2c7d692fae"} Nov 25 17:07:11 crc kubenswrapper[4812]: I1125 17:07:11.046892 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"a9ed1d0c-870e-4e4c-8e13-6fbd5f18559e","Type":"ContainerStarted","Data":"be621eebb07976887f3690773542271531b8e8dbf7f5fabdd0224c2ab8e71958"} Nov 25 17:07:11 crc kubenswrapper[4812]: I1125 17:07:11.047205 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 17:07:11 crc kubenswrapper[4812]: I1125 17:07:11.054420 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 17:07:11 crc kubenswrapper[4812]: I1125 17:07:11.072122 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.072102029 podStartE2EDuration="2.072102029s" podCreationTimestamp="2025-11-25 17:07:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:07:11.061644543 +0000 UTC m=+1205.901786648" watchObservedRunningTime="2025-11-25 17:07:11.072102029 +0000 UTC m=+1205.912244124" Nov 25 17:07:11 crc kubenswrapper[4812]: I1125 17:07:11.089872 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.089846973 podStartE2EDuration="2.089846973s" podCreationTimestamp="2025-11-25 17:07:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:07:11.081904267 +0000 UTC m=+1205.922046372" watchObservedRunningTime="2025-11-25 17:07:11.089846973 +0000 UTC m=+1205.929989068" Nov 25 17:07:11 crc kubenswrapper[4812]: I1125 17:07:11.268472 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5b856c5697-n2rck"] Nov 25 17:07:11 crc kubenswrapper[4812]: I1125 17:07:11.273197 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b856c5697-n2rck" Nov 25 17:07:11 crc kubenswrapper[4812]: I1125 17:07:11.323103 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b856c5697-n2rck"] Nov 25 17:07:11 crc kubenswrapper[4812]: I1125 17:07:11.392727 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf-ovsdbserver-sb\") pod \"dnsmasq-dns-5b856c5697-n2rck\" (UID: \"8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf\") " pod="openstack/dnsmasq-dns-5b856c5697-n2rck" Nov 25 17:07:11 crc kubenswrapper[4812]: I1125 17:07:11.392775 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5gn7f\" (UniqueName: \"kubernetes.io/projected/8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf-kube-api-access-5gn7f\") pod \"dnsmasq-dns-5b856c5697-n2rck\" (UID: \"8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf\") " pod="openstack/dnsmasq-dns-5b856c5697-n2rck" Nov 25 17:07:11 crc kubenswrapper[4812]: I1125 17:07:11.392824 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf-dns-svc\") pod \"dnsmasq-dns-5b856c5697-n2rck\" (UID: \"8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf\") " pod="openstack/dnsmasq-dns-5b856c5697-n2rck" Nov 25 17:07:11 crc kubenswrapper[4812]: I1125 17:07:11.392972 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf-config\") pod \"dnsmasq-dns-5b856c5697-n2rck\" (UID: \"8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf\") " pod="openstack/dnsmasq-dns-5b856c5697-n2rck" Nov 25 17:07:11 crc kubenswrapper[4812]: I1125 17:07:11.393008 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf-ovsdbserver-nb\") pod \"dnsmasq-dns-5b856c5697-n2rck\" (UID: \"8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf\") " pod="openstack/dnsmasq-dns-5b856c5697-n2rck" Nov 25 17:07:11 crc kubenswrapper[4812]: I1125 17:07:11.494472 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf-ovsdbserver-sb\") pod \"dnsmasq-dns-5b856c5697-n2rck\" (UID: \"8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf\") " pod="openstack/dnsmasq-dns-5b856c5697-n2rck" Nov 25 17:07:11 crc kubenswrapper[4812]: I1125 17:07:11.494524 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5gn7f\" (UniqueName: \"kubernetes.io/projected/8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf-kube-api-access-5gn7f\") pod \"dnsmasq-dns-5b856c5697-n2rck\" (UID: \"8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf\") " pod="openstack/dnsmasq-dns-5b856c5697-n2rck" Nov 25 17:07:11 crc kubenswrapper[4812]: I1125 17:07:11.494609 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf-dns-svc\") pod \"dnsmasq-dns-5b856c5697-n2rck\" (UID: \"8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf\") " pod="openstack/dnsmasq-dns-5b856c5697-n2rck" Nov 25 17:07:11 crc kubenswrapper[4812]: I1125 17:07:11.494681 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf-config\") pod \"dnsmasq-dns-5b856c5697-n2rck\" (UID: \"8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf\") " pod="openstack/dnsmasq-dns-5b856c5697-n2rck" Nov 25 17:07:11 crc kubenswrapper[4812]: I1125 17:07:11.494707 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf-ovsdbserver-nb\") pod \"dnsmasq-dns-5b856c5697-n2rck\" (UID: \"8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf\") " pod="openstack/dnsmasq-dns-5b856c5697-n2rck" Nov 25 17:07:11 crc kubenswrapper[4812]: I1125 17:07:11.497184 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf-ovsdbserver-sb\") pod \"dnsmasq-dns-5b856c5697-n2rck\" (UID: \"8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf\") " pod="openstack/dnsmasq-dns-5b856c5697-n2rck" Nov 25 17:07:11 crc kubenswrapper[4812]: I1125 17:07:11.497485 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf-dns-svc\") pod \"dnsmasq-dns-5b856c5697-n2rck\" (UID: \"8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf\") " pod="openstack/dnsmasq-dns-5b856c5697-n2rck" Nov 25 17:07:11 crc kubenswrapper[4812]: I1125 17:07:11.497631 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf-config\") pod \"dnsmasq-dns-5b856c5697-n2rck\" (UID: \"8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf\") " pod="openstack/dnsmasq-dns-5b856c5697-n2rck" Nov 25 17:07:11 crc kubenswrapper[4812]: I1125 17:07:11.498148 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf-ovsdbserver-nb\") pod \"dnsmasq-dns-5b856c5697-n2rck\" (UID: \"8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf\") " pod="openstack/dnsmasq-dns-5b856c5697-n2rck" Nov 25 17:07:11 crc kubenswrapper[4812]: I1125 17:07:11.523821 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5gn7f\" (UniqueName: \"kubernetes.io/projected/8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf-kube-api-access-5gn7f\") pod \"dnsmasq-dns-5b856c5697-n2rck\" (UID: \"8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf\") " pod="openstack/dnsmasq-dns-5b856c5697-n2rck" Nov 25 17:07:11 crc kubenswrapper[4812]: I1125 17:07:11.613182 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b856c5697-n2rck" Nov 25 17:07:12 crc kubenswrapper[4812]: I1125 17:07:12.064875 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b856c5697-n2rck"] Nov 25 17:07:13 crc kubenswrapper[4812]: I1125 17:07:13.062860 4812 generic.go:334] "Generic (PLEG): container finished" podID="8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf" containerID="316f48bd193b69b47743ad7e4b733cfe55ef0acb930deb21b5121b16ff5f929e" exitCode=0 Nov 25 17:07:13 crc kubenswrapper[4812]: I1125 17:07:13.062949 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b856c5697-n2rck" event={"ID":"8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf","Type":"ContainerDied","Data":"316f48bd193b69b47743ad7e4b733cfe55ef0acb930deb21b5121b16ff5f929e"} Nov 25 17:07:13 crc kubenswrapper[4812]: I1125 17:07:13.062993 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b856c5697-n2rck" event={"ID":"8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf","Type":"ContainerStarted","Data":"de15b751f0298f5dc9a67a746b7ad5aa315ed7dbbbdfe4ea729cf69f12966d6d"} Nov 25 17:07:13 crc kubenswrapper[4812]: I1125 17:07:13.410457 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:07:13 crc kubenswrapper[4812]: I1125 17:07:13.410739 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="12174231-9610-42f1-aaea-50d8aeae60a5" containerName="ceilometer-central-agent" containerID="cri-o://cb1a3d783bf22e462aa06323ea8dee14932a14e0b7e9de16462aefd3bff05790" gracePeriod=30 Nov 25 17:07:13 crc kubenswrapper[4812]: I1125 17:07:13.410859 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="12174231-9610-42f1-aaea-50d8aeae60a5" containerName="ceilometer-notification-agent" containerID="cri-o://3174e048735fb18659b69e9f57f90a70901161cc4133fc5150797ff5079f97f7" gracePeriod=30 Nov 25 17:07:13 crc kubenswrapper[4812]: I1125 17:07:13.410845 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="12174231-9610-42f1-aaea-50d8aeae60a5" containerName="proxy-httpd" containerID="cri-o://b93ae69aca4442271c7b7728df6c6a787f2f65a0d5778e60674692f037294dfc" gracePeriod=30 Nov 25 17:07:13 crc kubenswrapper[4812]: I1125 17:07:13.411086 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="12174231-9610-42f1-aaea-50d8aeae60a5" containerName="sg-core" containerID="cri-o://5224c0e6f79cf66d76af4c830e437262bd9ccb96243564de4ae3b05675a4cb5b" gracePeriod=30 Nov 25 17:07:13 crc kubenswrapper[4812]: E1125 17:07:13.598133 4812 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod12174231_9610_42f1_aaea_50d8aeae60a5.slice/crio-5224c0e6f79cf66d76af4c830e437262bd9ccb96243564de4ae3b05675a4cb5b.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod12174231_9610_42f1_aaea_50d8aeae60a5.slice/crio-conmon-5224c0e6f79cf66d76af4c830e437262bd9ccb96243564de4ae3b05675a4cb5b.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod12174231_9610_42f1_aaea_50d8aeae60a5.slice/crio-b93ae69aca4442271c7b7728df6c6a787f2f65a0d5778e60674692f037294dfc.scope\": RecentStats: unable to find data in memory cache]" Nov 25 17:07:13 crc kubenswrapper[4812]: I1125 17:07:13.740448 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 17:07:14 crc kubenswrapper[4812]: I1125 17:07:14.088993 4812 generic.go:334] "Generic (PLEG): container finished" podID="12174231-9610-42f1-aaea-50d8aeae60a5" containerID="b93ae69aca4442271c7b7728df6c6a787f2f65a0d5778e60674692f037294dfc" exitCode=0 Nov 25 17:07:14 crc kubenswrapper[4812]: I1125 17:07:14.089381 4812 generic.go:334] "Generic (PLEG): container finished" podID="12174231-9610-42f1-aaea-50d8aeae60a5" containerID="5224c0e6f79cf66d76af4c830e437262bd9ccb96243564de4ae3b05675a4cb5b" exitCode=2 Nov 25 17:07:14 crc kubenswrapper[4812]: I1125 17:07:14.089478 4812 generic.go:334] "Generic (PLEG): container finished" podID="12174231-9610-42f1-aaea-50d8aeae60a5" containerID="cb1a3d783bf22e462aa06323ea8dee14932a14e0b7e9de16462aefd3bff05790" exitCode=0 Nov 25 17:07:14 crc kubenswrapper[4812]: I1125 17:07:14.089056 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"12174231-9610-42f1-aaea-50d8aeae60a5","Type":"ContainerDied","Data":"b93ae69aca4442271c7b7728df6c6a787f2f65a0d5778e60674692f037294dfc"} Nov 25 17:07:14 crc kubenswrapper[4812]: I1125 17:07:14.089829 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"12174231-9610-42f1-aaea-50d8aeae60a5","Type":"ContainerDied","Data":"5224c0e6f79cf66d76af4c830e437262bd9ccb96243564de4ae3b05675a4cb5b"} Nov 25 17:07:14 crc kubenswrapper[4812]: I1125 17:07:14.089955 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"12174231-9610-42f1-aaea-50d8aeae60a5","Type":"ContainerDied","Data":"cb1a3d783bf22e462aa06323ea8dee14932a14e0b7e9de16462aefd3bff05790"} Nov 25 17:07:14 crc kubenswrapper[4812]: I1125 17:07:14.092793 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a" containerName="nova-api-log" containerID="cri-o://9de63679e6f433d98a86a745a1f7493352a013f266a1e7f28aeddf3c01df43ac" gracePeriod=30 Nov 25 17:07:14 crc kubenswrapper[4812]: I1125 17:07:14.094049 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b856c5697-n2rck" event={"ID":"8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf","Type":"ContainerStarted","Data":"f2ab7e5ed96da37f8e888ff2ad167ba4f5893a9c850c188eb7257b1f248b74c7"} Nov 25 17:07:14 crc kubenswrapper[4812]: I1125 17:07:14.094093 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a" containerName="nova-api-api" containerID="cri-o://e6ab1bbd119f9db68e36d31d567fb111091b89a8a34a5fb8172d10cc35209c61" gracePeriod=30 Nov 25 17:07:14 crc kubenswrapper[4812]: I1125 17:07:14.094468 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5b856c5697-n2rck" Nov 25 17:07:14 crc kubenswrapper[4812]: I1125 17:07:14.124553 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5b856c5697-n2rck" podStartSLOduration=3.124518346 podStartE2EDuration="3.124518346s" podCreationTimestamp="2025-11-25 17:07:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:07:14.122301935 +0000 UTC m=+1208.962444040" watchObservedRunningTime="2025-11-25 17:07:14.124518346 +0000 UTC m=+1208.964660441" Nov 25 17:07:14 crc kubenswrapper[4812]: I1125 17:07:14.466880 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 25 17:07:14 crc kubenswrapper[4812]: I1125 17:07:14.476597 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 17:07:14 crc kubenswrapper[4812]: I1125 17:07:14.476743 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 17:07:15 crc kubenswrapper[4812]: I1125 17:07:15.103037 4812 generic.go:334] "Generic (PLEG): container finished" podID="4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a" containerID="9de63679e6f433d98a86a745a1f7493352a013f266a1e7f28aeddf3c01df43ac" exitCode=143 Nov 25 17:07:15 crc kubenswrapper[4812]: I1125 17:07:15.103174 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a","Type":"ContainerDied","Data":"9de63679e6f433d98a86a745a1f7493352a013f266a1e7f28aeddf3c01df43ac"} Nov 25 17:07:15 crc kubenswrapper[4812]: I1125 17:07:15.459524 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 17:07:15 crc kubenswrapper[4812]: I1125 17:07:15.466972 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12174231-9610-42f1-aaea-50d8aeae60a5-config-data\") pod \"12174231-9610-42f1-aaea-50d8aeae60a5\" (UID: \"12174231-9610-42f1-aaea-50d8aeae60a5\") " Nov 25 17:07:15 crc kubenswrapper[4812]: I1125 17:07:15.467016 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12174231-9610-42f1-aaea-50d8aeae60a5-combined-ca-bundle\") pod \"12174231-9610-42f1-aaea-50d8aeae60a5\" (UID: \"12174231-9610-42f1-aaea-50d8aeae60a5\") " Nov 25 17:07:15 crc kubenswrapper[4812]: I1125 17:07:15.467048 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/12174231-9610-42f1-aaea-50d8aeae60a5-scripts\") pod \"12174231-9610-42f1-aaea-50d8aeae60a5\" (UID: \"12174231-9610-42f1-aaea-50d8aeae60a5\") " Nov 25 17:07:15 crc kubenswrapper[4812]: I1125 17:07:15.467093 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/12174231-9610-42f1-aaea-50d8aeae60a5-ceilometer-tls-certs\") pod \"12174231-9610-42f1-aaea-50d8aeae60a5\" (UID: \"12174231-9610-42f1-aaea-50d8aeae60a5\") " Nov 25 17:07:15 crc kubenswrapper[4812]: I1125 17:07:15.467121 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/12174231-9610-42f1-aaea-50d8aeae60a5-log-httpd\") pod \"12174231-9610-42f1-aaea-50d8aeae60a5\" (UID: \"12174231-9610-42f1-aaea-50d8aeae60a5\") " Nov 25 17:07:15 crc kubenswrapper[4812]: I1125 17:07:15.467160 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nfknq\" (UniqueName: \"kubernetes.io/projected/12174231-9610-42f1-aaea-50d8aeae60a5-kube-api-access-nfknq\") pod \"12174231-9610-42f1-aaea-50d8aeae60a5\" (UID: \"12174231-9610-42f1-aaea-50d8aeae60a5\") " Nov 25 17:07:15 crc kubenswrapper[4812]: I1125 17:07:15.467185 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/12174231-9610-42f1-aaea-50d8aeae60a5-sg-core-conf-yaml\") pod \"12174231-9610-42f1-aaea-50d8aeae60a5\" (UID: \"12174231-9610-42f1-aaea-50d8aeae60a5\") " Nov 25 17:07:15 crc kubenswrapper[4812]: I1125 17:07:15.467221 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/12174231-9610-42f1-aaea-50d8aeae60a5-run-httpd\") pod \"12174231-9610-42f1-aaea-50d8aeae60a5\" (UID: \"12174231-9610-42f1-aaea-50d8aeae60a5\") " Nov 25 17:07:15 crc kubenswrapper[4812]: I1125 17:07:15.467621 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/12174231-9610-42f1-aaea-50d8aeae60a5-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "12174231-9610-42f1-aaea-50d8aeae60a5" (UID: "12174231-9610-42f1-aaea-50d8aeae60a5"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:07:15 crc kubenswrapper[4812]: I1125 17:07:15.467695 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/12174231-9610-42f1-aaea-50d8aeae60a5-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "12174231-9610-42f1-aaea-50d8aeae60a5" (UID: "12174231-9610-42f1-aaea-50d8aeae60a5"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:07:15 crc kubenswrapper[4812]: I1125 17:07:15.468187 4812 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/12174231-9610-42f1-aaea-50d8aeae60a5-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:15 crc kubenswrapper[4812]: I1125 17:07:15.468200 4812 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/12174231-9610-42f1-aaea-50d8aeae60a5-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:15 crc kubenswrapper[4812]: I1125 17:07:15.471991 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12174231-9610-42f1-aaea-50d8aeae60a5-scripts" (OuterVolumeSpecName: "scripts") pod "12174231-9610-42f1-aaea-50d8aeae60a5" (UID: "12174231-9610-42f1-aaea-50d8aeae60a5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:07:15 crc kubenswrapper[4812]: I1125 17:07:15.472980 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12174231-9610-42f1-aaea-50d8aeae60a5-kube-api-access-nfknq" (OuterVolumeSpecName: "kube-api-access-nfknq") pod "12174231-9610-42f1-aaea-50d8aeae60a5" (UID: "12174231-9610-42f1-aaea-50d8aeae60a5"). InnerVolumeSpecName "kube-api-access-nfknq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:07:15 crc kubenswrapper[4812]: I1125 17:07:15.510773 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12174231-9610-42f1-aaea-50d8aeae60a5-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "12174231-9610-42f1-aaea-50d8aeae60a5" (UID: "12174231-9610-42f1-aaea-50d8aeae60a5"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:07:15 crc kubenswrapper[4812]: I1125 17:07:15.531954 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12174231-9610-42f1-aaea-50d8aeae60a5-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "12174231-9610-42f1-aaea-50d8aeae60a5" (UID: "12174231-9610-42f1-aaea-50d8aeae60a5"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:07:15 crc kubenswrapper[4812]: I1125 17:07:15.569848 4812 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/12174231-9610-42f1-aaea-50d8aeae60a5-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:15 crc kubenswrapper[4812]: I1125 17:07:15.569870 4812 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/12174231-9610-42f1-aaea-50d8aeae60a5-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:15 crc kubenswrapper[4812]: I1125 17:07:15.569879 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nfknq\" (UniqueName: \"kubernetes.io/projected/12174231-9610-42f1-aaea-50d8aeae60a5-kube-api-access-nfknq\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:15 crc kubenswrapper[4812]: I1125 17:07:15.569887 4812 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/12174231-9610-42f1-aaea-50d8aeae60a5-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:15 crc kubenswrapper[4812]: I1125 17:07:15.576599 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12174231-9610-42f1-aaea-50d8aeae60a5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "12174231-9610-42f1-aaea-50d8aeae60a5" (UID: "12174231-9610-42f1-aaea-50d8aeae60a5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:07:15 crc kubenswrapper[4812]: I1125 17:07:15.620247 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12174231-9610-42f1-aaea-50d8aeae60a5-config-data" (OuterVolumeSpecName: "config-data") pod "12174231-9610-42f1-aaea-50d8aeae60a5" (UID: "12174231-9610-42f1-aaea-50d8aeae60a5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:07:15 crc kubenswrapper[4812]: I1125 17:07:15.674985 4812 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12174231-9610-42f1-aaea-50d8aeae60a5-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:15 crc kubenswrapper[4812]: I1125 17:07:15.675028 4812 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12174231-9610-42f1-aaea-50d8aeae60a5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.113001 4812 generic.go:334] "Generic (PLEG): container finished" podID="12174231-9610-42f1-aaea-50d8aeae60a5" containerID="3174e048735fb18659b69e9f57f90a70901161cc4133fc5150797ff5079f97f7" exitCode=0 Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.113039 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"12174231-9610-42f1-aaea-50d8aeae60a5","Type":"ContainerDied","Data":"3174e048735fb18659b69e9f57f90a70901161cc4133fc5150797ff5079f97f7"} Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.113074 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"12174231-9610-42f1-aaea-50d8aeae60a5","Type":"ContainerDied","Data":"d78c42dc472acfec4ee9f42102cd173032d39babdb9e2002cb50b992d5014859"} Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.113090 4812 scope.go:117] "RemoveContainer" containerID="b93ae69aca4442271c7b7728df6c6a787f2f65a0d5778e60674692f037294dfc" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.113132 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.149773 4812 scope.go:117] "RemoveContainer" containerID="5224c0e6f79cf66d76af4c830e437262bd9ccb96243564de4ae3b05675a4cb5b" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.152443 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.171638 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.178630 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.178880 4812 scope.go:117] "RemoveContainer" containerID="3174e048735fb18659b69e9f57f90a70901161cc4133fc5150797ff5079f97f7" Nov 25 17:07:16 crc kubenswrapper[4812]: E1125 17:07:16.179013 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12174231-9610-42f1-aaea-50d8aeae60a5" containerName="proxy-httpd" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.179030 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="12174231-9610-42f1-aaea-50d8aeae60a5" containerName="proxy-httpd" Nov 25 17:07:16 crc kubenswrapper[4812]: E1125 17:07:16.179053 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12174231-9610-42f1-aaea-50d8aeae60a5" containerName="sg-core" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.179059 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="12174231-9610-42f1-aaea-50d8aeae60a5" containerName="sg-core" Nov 25 17:07:16 crc kubenswrapper[4812]: E1125 17:07:16.179082 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12174231-9610-42f1-aaea-50d8aeae60a5" containerName="ceilometer-central-agent" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.179089 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="12174231-9610-42f1-aaea-50d8aeae60a5" containerName="ceilometer-central-agent" Nov 25 17:07:16 crc kubenswrapper[4812]: E1125 17:07:16.179095 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12174231-9610-42f1-aaea-50d8aeae60a5" containerName="ceilometer-notification-agent" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.179101 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="12174231-9610-42f1-aaea-50d8aeae60a5" containerName="ceilometer-notification-agent" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.179277 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="12174231-9610-42f1-aaea-50d8aeae60a5" containerName="ceilometer-central-agent" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.179291 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="12174231-9610-42f1-aaea-50d8aeae60a5" containerName="proxy-httpd" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.179719 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="12174231-9610-42f1-aaea-50d8aeae60a5" containerName="sg-core" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.179737 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="12174231-9610-42f1-aaea-50d8aeae60a5" containerName="ceilometer-notification-agent" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.185837 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.188753 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.190579 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.190688 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.196157 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.208727 4812 scope.go:117] "RemoveContainer" containerID="cb1a3d783bf22e462aa06323ea8dee14932a14e0b7e9de16462aefd3bff05790" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.231562 4812 scope.go:117] "RemoveContainer" containerID="b93ae69aca4442271c7b7728df6c6a787f2f65a0d5778e60674692f037294dfc" Nov 25 17:07:16 crc kubenswrapper[4812]: E1125 17:07:16.232696 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b93ae69aca4442271c7b7728df6c6a787f2f65a0d5778e60674692f037294dfc\": container with ID starting with b93ae69aca4442271c7b7728df6c6a787f2f65a0d5778e60674692f037294dfc not found: ID does not exist" containerID="b93ae69aca4442271c7b7728df6c6a787f2f65a0d5778e60674692f037294dfc" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.232734 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b93ae69aca4442271c7b7728df6c6a787f2f65a0d5778e60674692f037294dfc"} err="failed to get container status \"b93ae69aca4442271c7b7728df6c6a787f2f65a0d5778e60674692f037294dfc\": rpc error: code = NotFound desc = could not find container \"b93ae69aca4442271c7b7728df6c6a787f2f65a0d5778e60674692f037294dfc\": container with ID starting with b93ae69aca4442271c7b7728df6c6a787f2f65a0d5778e60674692f037294dfc not found: ID does not exist" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.232762 4812 scope.go:117] "RemoveContainer" containerID="5224c0e6f79cf66d76af4c830e437262bd9ccb96243564de4ae3b05675a4cb5b" Nov 25 17:07:16 crc kubenswrapper[4812]: E1125 17:07:16.233305 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5224c0e6f79cf66d76af4c830e437262bd9ccb96243564de4ae3b05675a4cb5b\": container with ID starting with 5224c0e6f79cf66d76af4c830e437262bd9ccb96243564de4ae3b05675a4cb5b not found: ID does not exist" containerID="5224c0e6f79cf66d76af4c830e437262bd9ccb96243564de4ae3b05675a4cb5b" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.233363 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5224c0e6f79cf66d76af4c830e437262bd9ccb96243564de4ae3b05675a4cb5b"} err="failed to get container status \"5224c0e6f79cf66d76af4c830e437262bd9ccb96243564de4ae3b05675a4cb5b\": rpc error: code = NotFound desc = could not find container \"5224c0e6f79cf66d76af4c830e437262bd9ccb96243564de4ae3b05675a4cb5b\": container with ID starting with 5224c0e6f79cf66d76af4c830e437262bd9ccb96243564de4ae3b05675a4cb5b not found: ID does not exist" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.233396 4812 scope.go:117] "RemoveContainer" containerID="3174e048735fb18659b69e9f57f90a70901161cc4133fc5150797ff5079f97f7" Nov 25 17:07:16 crc kubenswrapper[4812]: E1125 17:07:16.233978 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3174e048735fb18659b69e9f57f90a70901161cc4133fc5150797ff5079f97f7\": container with ID starting with 3174e048735fb18659b69e9f57f90a70901161cc4133fc5150797ff5079f97f7 not found: ID does not exist" containerID="3174e048735fb18659b69e9f57f90a70901161cc4133fc5150797ff5079f97f7" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.234008 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3174e048735fb18659b69e9f57f90a70901161cc4133fc5150797ff5079f97f7"} err="failed to get container status \"3174e048735fb18659b69e9f57f90a70901161cc4133fc5150797ff5079f97f7\": rpc error: code = NotFound desc = could not find container \"3174e048735fb18659b69e9f57f90a70901161cc4133fc5150797ff5079f97f7\": container with ID starting with 3174e048735fb18659b69e9f57f90a70901161cc4133fc5150797ff5079f97f7 not found: ID does not exist" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.234024 4812 scope.go:117] "RemoveContainer" containerID="cb1a3d783bf22e462aa06323ea8dee14932a14e0b7e9de16462aefd3bff05790" Nov 25 17:07:16 crc kubenswrapper[4812]: E1125 17:07:16.234690 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cb1a3d783bf22e462aa06323ea8dee14932a14e0b7e9de16462aefd3bff05790\": container with ID starting with cb1a3d783bf22e462aa06323ea8dee14932a14e0b7e9de16462aefd3bff05790 not found: ID does not exist" containerID="cb1a3d783bf22e462aa06323ea8dee14932a14e0b7e9de16462aefd3bff05790" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.234937 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb1a3d783bf22e462aa06323ea8dee14932a14e0b7e9de16462aefd3bff05790"} err="failed to get container status \"cb1a3d783bf22e462aa06323ea8dee14932a14e0b7e9de16462aefd3bff05790\": rpc error: code = NotFound desc = could not find container \"cb1a3d783bf22e462aa06323ea8dee14932a14e0b7e9de16462aefd3bff05790\": container with ID starting with cb1a3d783bf22e462aa06323ea8dee14932a14e0b7e9de16462aefd3bff05790 not found: ID does not exist" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.287234 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-log-httpd\") pod \"ceilometer-0\" (UID: \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\") " pod="openstack/ceilometer-0" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.287287 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-run-httpd\") pod \"ceilometer-0\" (UID: \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\") " pod="openstack/ceilometer-0" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.287310 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-scripts\") pod \"ceilometer-0\" (UID: \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\") " pod="openstack/ceilometer-0" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.287342 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\") " pod="openstack/ceilometer-0" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.287366 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\") " pod="openstack/ceilometer-0" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.287443 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\") " pod="openstack/ceilometer-0" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.287459 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z2z26\" (UniqueName: \"kubernetes.io/projected/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-kube-api-access-z2z26\") pod \"ceilometer-0\" (UID: \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\") " pod="openstack/ceilometer-0" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.287482 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-config-data\") pod \"ceilometer-0\" (UID: \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\") " pod="openstack/ceilometer-0" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.389665 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\") " pod="openstack/ceilometer-0" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.389995 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\") " pod="openstack/ceilometer-0" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.390086 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\") " pod="openstack/ceilometer-0" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.390104 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z2z26\" (UniqueName: \"kubernetes.io/projected/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-kube-api-access-z2z26\") pod \"ceilometer-0\" (UID: \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\") " pod="openstack/ceilometer-0" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.390133 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-config-data\") pod \"ceilometer-0\" (UID: \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\") " pod="openstack/ceilometer-0" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.390174 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-log-httpd\") pod \"ceilometer-0\" (UID: \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\") " pod="openstack/ceilometer-0" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.390204 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-run-httpd\") pod \"ceilometer-0\" (UID: \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\") " pod="openstack/ceilometer-0" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.390225 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-scripts\") pod \"ceilometer-0\" (UID: \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\") " pod="openstack/ceilometer-0" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.390725 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-log-httpd\") pod \"ceilometer-0\" (UID: \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\") " pod="openstack/ceilometer-0" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.391072 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-run-httpd\") pod \"ceilometer-0\" (UID: \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\") " pod="openstack/ceilometer-0" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.398383 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\") " pod="openstack/ceilometer-0" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.398508 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\") " pod="openstack/ceilometer-0" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.398704 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-scripts\") pod \"ceilometer-0\" (UID: \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\") " pod="openstack/ceilometer-0" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.398963 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\") " pod="openstack/ceilometer-0" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.399261 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-config-data\") pod \"ceilometer-0\" (UID: \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\") " pod="openstack/ceilometer-0" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.405830 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z2z26\" (UniqueName: \"kubernetes.io/projected/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-kube-api-access-z2z26\") pod \"ceilometer-0\" (UID: \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\") " pod="openstack/ceilometer-0" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.502066 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 17:07:16 crc kubenswrapper[4812]: I1125 17:07:16.942988 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:07:17 crc kubenswrapper[4812]: I1125 17:07:17.123885 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e","Type":"ContainerStarted","Data":"9604ae0888ec7729826b5c679fb9c8c16a8c3234b13de2edca5f577e45fb868f"} Nov 25 17:07:17 crc kubenswrapper[4812]: I1125 17:07:17.841624 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="12174231-9610-42f1-aaea-50d8aeae60a5" path="/var/lib/kubelet/pods/12174231-9610-42f1-aaea-50d8aeae60a5/volumes" Nov 25 17:07:18 crc kubenswrapper[4812]: I1125 17:07:18.143050 4812 generic.go:334] "Generic (PLEG): container finished" podID="4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a" containerID="e6ab1bbd119f9db68e36d31d567fb111091b89a8a34a5fb8172d10cc35209c61" exitCode=0 Nov 25 17:07:18 crc kubenswrapper[4812]: I1125 17:07:18.143617 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a","Type":"ContainerDied","Data":"e6ab1bbd119f9db68e36d31d567fb111091b89a8a34a5fb8172d10cc35209c61"} Nov 25 17:07:18 crc kubenswrapper[4812]: I1125 17:07:18.332168 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 17:07:18 crc kubenswrapper[4812]: I1125 17:07:18.424341 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z55rm\" (UniqueName: \"kubernetes.io/projected/4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a-kube-api-access-z55rm\") pod \"4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a\" (UID: \"4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a\") " Nov 25 17:07:18 crc kubenswrapper[4812]: I1125 17:07:18.424559 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a-combined-ca-bundle\") pod \"4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a\" (UID: \"4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a\") " Nov 25 17:07:18 crc kubenswrapper[4812]: I1125 17:07:18.424615 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a-logs\") pod \"4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a\" (UID: \"4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a\") " Nov 25 17:07:18 crc kubenswrapper[4812]: I1125 17:07:18.424729 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a-config-data\") pod \"4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a\" (UID: \"4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a\") " Nov 25 17:07:18 crc kubenswrapper[4812]: I1125 17:07:18.425195 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a-logs" (OuterVolumeSpecName: "logs") pod "4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a" (UID: "4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:07:18 crc kubenswrapper[4812]: I1125 17:07:18.430422 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a-kube-api-access-z55rm" (OuterVolumeSpecName: "kube-api-access-z55rm") pod "4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a" (UID: "4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a"). InnerVolumeSpecName "kube-api-access-z55rm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:07:18 crc kubenswrapper[4812]: I1125 17:07:18.451165 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a" (UID: "4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:07:18 crc kubenswrapper[4812]: I1125 17:07:18.455668 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a-config-data" (OuterVolumeSpecName: "config-data") pod "4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a" (UID: "4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:07:18 crc kubenswrapper[4812]: I1125 17:07:18.527367 4812 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:18 crc kubenswrapper[4812]: I1125 17:07:18.527405 4812 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a-logs\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:18 crc kubenswrapper[4812]: I1125 17:07:18.527416 4812 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:18 crc kubenswrapper[4812]: I1125 17:07:18.527425 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z55rm\" (UniqueName: \"kubernetes.io/projected/4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a-kube-api-access-z55rm\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.157241 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e","Type":"ContainerStarted","Data":"5d8b3164f3054270c90c9cf8762d6a433edaaacf171506e9f33e17cf48ca200d"} Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.157568 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e","Type":"ContainerStarted","Data":"28431f50b304ab9fdd0ad7a04533707dacbedb2e9d840520339092663c124fb4"} Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.160218 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a","Type":"ContainerDied","Data":"5499cc3423673af1eded558035ca49dcf3f0a36979f81216f23471ac33fd05a3"} Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.160276 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.160282 4812 scope.go:117] "RemoveContainer" containerID="e6ab1bbd119f9db68e36d31d567fb111091b89a8a34a5fb8172d10cc35209c61" Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.185479 4812 scope.go:117] "RemoveContainer" containerID="9de63679e6f433d98a86a745a1f7493352a013f266a1e7f28aeddf3c01df43ac" Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.196951 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.208288 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.223604 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 17:07:19 crc kubenswrapper[4812]: E1125 17:07:19.224071 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a" containerName="nova-api-log" Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.224087 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a" containerName="nova-api-log" Nov 25 17:07:19 crc kubenswrapper[4812]: E1125 17:07:19.224116 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a" containerName="nova-api-api" Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.224125 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a" containerName="nova-api-api" Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.224361 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a" containerName="nova-api-log" Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.224387 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a" containerName="nova-api-api" Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.225578 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.228203 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.229873 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.231079 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.235228 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.241038 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6340ade-dfc6-4e36-b86a-7315570c1b8e-internal-tls-certs\") pod \"nova-api-0\" (UID: \"d6340ade-dfc6-4e36-b86a-7315570c1b8e\") " pod="openstack/nova-api-0" Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.241081 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6340ade-dfc6-4e36-b86a-7315570c1b8e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d6340ade-dfc6-4e36-b86a-7315570c1b8e\") " pod="openstack/nova-api-0" Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.241116 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9tq2z\" (UniqueName: \"kubernetes.io/projected/d6340ade-dfc6-4e36-b86a-7315570c1b8e-kube-api-access-9tq2z\") pod \"nova-api-0\" (UID: \"d6340ade-dfc6-4e36-b86a-7315570c1b8e\") " pod="openstack/nova-api-0" Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.241150 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d6340ade-dfc6-4e36-b86a-7315570c1b8e-logs\") pod \"nova-api-0\" (UID: \"d6340ade-dfc6-4e36-b86a-7315570c1b8e\") " pod="openstack/nova-api-0" Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.241229 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6340ade-dfc6-4e36-b86a-7315570c1b8e-config-data\") pod \"nova-api-0\" (UID: \"d6340ade-dfc6-4e36-b86a-7315570c1b8e\") " pod="openstack/nova-api-0" Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.241242 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6340ade-dfc6-4e36-b86a-7315570c1b8e-public-tls-certs\") pod \"nova-api-0\" (UID: \"d6340ade-dfc6-4e36-b86a-7315570c1b8e\") " pod="openstack/nova-api-0" Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.342344 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d6340ade-dfc6-4e36-b86a-7315570c1b8e-logs\") pod \"nova-api-0\" (UID: \"d6340ade-dfc6-4e36-b86a-7315570c1b8e\") " pod="openstack/nova-api-0" Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.342466 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6340ade-dfc6-4e36-b86a-7315570c1b8e-config-data\") pod \"nova-api-0\" (UID: \"d6340ade-dfc6-4e36-b86a-7315570c1b8e\") " pod="openstack/nova-api-0" Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.342486 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6340ade-dfc6-4e36-b86a-7315570c1b8e-public-tls-certs\") pod \"nova-api-0\" (UID: \"d6340ade-dfc6-4e36-b86a-7315570c1b8e\") " pod="openstack/nova-api-0" Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.342576 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6340ade-dfc6-4e36-b86a-7315570c1b8e-internal-tls-certs\") pod \"nova-api-0\" (UID: \"d6340ade-dfc6-4e36-b86a-7315570c1b8e\") " pod="openstack/nova-api-0" Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.342612 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6340ade-dfc6-4e36-b86a-7315570c1b8e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d6340ade-dfc6-4e36-b86a-7315570c1b8e\") " pod="openstack/nova-api-0" Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.342655 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9tq2z\" (UniqueName: \"kubernetes.io/projected/d6340ade-dfc6-4e36-b86a-7315570c1b8e-kube-api-access-9tq2z\") pod \"nova-api-0\" (UID: \"d6340ade-dfc6-4e36-b86a-7315570c1b8e\") " pod="openstack/nova-api-0" Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.342862 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d6340ade-dfc6-4e36-b86a-7315570c1b8e-logs\") pod \"nova-api-0\" (UID: \"d6340ade-dfc6-4e36-b86a-7315570c1b8e\") " pod="openstack/nova-api-0" Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.350978 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6340ade-dfc6-4e36-b86a-7315570c1b8e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d6340ade-dfc6-4e36-b86a-7315570c1b8e\") " pod="openstack/nova-api-0" Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.351354 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6340ade-dfc6-4e36-b86a-7315570c1b8e-internal-tls-certs\") pod \"nova-api-0\" (UID: \"d6340ade-dfc6-4e36-b86a-7315570c1b8e\") " pod="openstack/nova-api-0" Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.351780 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6340ade-dfc6-4e36-b86a-7315570c1b8e-public-tls-certs\") pod \"nova-api-0\" (UID: \"d6340ade-dfc6-4e36-b86a-7315570c1b8e\") " pod="openstack/nova-api-0" Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.359328 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6340ade-dfc6-4e36-b86a-7315570c1b8e-config-data\") pod \"nova-api-0\" (UID: \"d6340ade-dfc6-4e36-b86a-7315570c1b8e\") " pod="openstack/nova-api-0" Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.378187 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9tq2z\" (UniqueName: \"kubernetes.io/projected/d6340ade-dfc6-4e36-b86a-7315570c1b8e-kube-api-access-9tq2z\") pod \"nova-api-0\" (UID: \"d6340ade-dfc6-4e36-b86a-7315570c1b8e\") " pod="openstack/nova-api-0" Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.466506 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.475768 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.475825 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.491621 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.545247 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.862149 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a" path="/var/lib/kubelet/pods/4ac60428-2fe2-4b32-8eb4-1ede2cad4e5a/volumes" Nov 25 17:07:19 crc kubenswrapper[4812]: I1125 17:07:19.990573 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 17:07:20 crc kubenswrapper[4812]: I1125 17:07:20.171632 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e","Type":"ContainerStarted","Data":"f322c6ae74b52c3736edb33ef2e48285044338720df862448e9abce33eb8d46e"} Nov 25 17:07:20 crc kubenswrapper[4812]: I1125 17:07:20.186863 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d6340ade-dfc6-4e36-b86a-7315570c1b8e","Type":"ContainerStarted","Data":"026b23d957d98cdb328de886a41cb0d6c4a19b80abd82e9997aa2cd2b301f127"} Nov 25 17:07:20 crc kubenswrapper[4812]: I1125 17:07:20.211921 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 25 17:07:20 crc kubenswrapper[4812]: I1125 17:07:20.424065 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-pwkn8"] Nov 25 17:07:20 crc kubenswrapper[4812]: I1125 17:07:20.432020 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-pwkn8" Nov 25 17:07:20 crc kubenswrapper[4812]: I1125 17:07:20.435624 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 25 17:07:20 crc kubenswrapper[4812]: I1125 17:07:20.435840 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 25 17:07:20 crc kubenswrapper[4812]: I1125 17:07:20.446228 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-pwkn8"] Nov 25 17:07:20 crc kubenswrapper[4812]: I1125 17:07:20.470786 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t87k2\" (UniqueName: \"kubernetes.io/projected/c6b3ec00-4b31-4c04-b90a-58d161c57811-kube-api-access-t87k2\") pod \"nova-cell1-cell-mapping-pwkn8\" (UID: \"c6b3ec00-4b31-4c04-b90a-58d161c57811\") " pod="openstack/nova-cell1-cell-mapping-pwkn8" Nov 25 17:07:20 crc kubenswrapper[4812]: I1125 17:07:20.470863 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c6b3ec00-4b31-4c04-b90a-58d161c57811-scripts\") pod \"nova-cell1-cell-mapping-pwkn8\" (UID: \"c6b3ec00-4b31-4c04-b90a-58d161c57811\") " pod="openstack/nova-cell1-cell-mapping-pwkn8" Nov 25 17:07:20 crc kubenswrapper[4812]: I1125 17:07:20.470938 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6b3ec00-4b31-4c04-b90a-58d161c57811-config-data\") pod \"nova-cell1-cell-mapping-pwkn8\" (UID: \"c6b3ec00-4b31-4c04-b90a-58d161c57811\") " pod="openstack/nova-cell1-cell-mapping-pwkn8" Nov 25 17:07:20 crc kubenswrapper[4812]: I1125 17:07:20.471019 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6b3ec00-4b31-4c04-b90a-58d161c57811-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-pwkn8\" (UID: \"c6b3ec00-4b31-4c04-b90a-58d161c57811\") " pod="openstack/nova-cell1-cell-mapping-pwkn8" Nov 25 17:07:20 crc kubenswrapper[4812]: I1125 17:07:20.488812 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="5d2e8567-1611-44ef-b206-e8c0baa4a215" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.183:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 17:07:20 crc kubenswrapper[4812]: I1125 17:07:20.488822 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="5d2e8567-1611-44ef-b206-e8c0baa4a215" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.183:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 17:07:20 crc kubenswrapper[4812]: I1125 17:07:20.572708 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c6b3ec00-4b31-4c04-b90a-58d161c57811-scripts\") pod \"nova-cell1-cell-mapping-pwkn8\" (UID: \"c6b3ec00-4b31-4c04-b90a-58d161c57811\") " pod="openstack/nova-cell1-cell-mapping-pwkn8" Nov 25 17:07:20 crc kubenswrapper[4812]: I1125 17:07:20.573388 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6b3ec00-4b31-4c04-b90a-58d161c57811-config-data\") pod \"nova-cell1-cell-mapping-pwkn8\" (UID: \"c6b3ec00-4b31-4c04-b90a-58d161c57811\") " pod="openstack/nova-cell1-cell-mapping-pwkn8" Nov 25 17:07:20 crc kubenswrapper[4812]: I1125 17:07:20.573592 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6b3ec00-4b31-4c04-b90a-58d161c57811-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-pwkn8\" (UID: \"c6b3ec00-4b31-4c04-b90a-58d161c57811\") " pod="openstack/nova-cell1-cell-mapping-pwkn8" Nov 25 17:07:20 crc kubenswrapper[4812]: I1125 17:07:20.573757 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t87k2\" (UniqueName: \"kubernetes.io/projected/c6b3ec00-4b31-4c04-b90a-58d161c57811-kube-api-access-t87k2\") pod \"nova-cell1-cell-mapping-pwkn8\" (UID: \"c6b3ec00-4b31-4c04-b90a-58d161c57811\") " pod="openstack/nova-cell1-cell-mapping-pwkn8" Nov 25 17:07:20 crc kubenswrapper[4812]: I1125 17:07:20.578287 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6b3ec00-4b31-4c04-b90a-58d161c57811-config-data\") pod \"nova-cell1-cell-mapping-pwkn8\" (UID: \"c6b3ec00-4b31-4c04-b90a-58d161c57811\") " pod="openstack/nova-cell1-cell-mapping-pwkn8" Nov 25 17:07:20 crc kubenswrapper[4812]: I1125 17:07:20.578500 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c6b3ec00-4b31-4c04-b90a-58d161c57811-scripts\") pod \"nova-cell1-cell-mapping-pwkn8\" (UID: \"c6b3ec00-4b31-4c04-b90a-58d161c57811\") " pod="openstack/nova-cell1-cell-mapping-pwkn8" Nov 25 17:07:20 crc kubenswrapper[4812]: I1125 17:07:20.578806 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6b3ec00-4b31-4c04-b90a-58d161c57811-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-pwkn8\" (UID: \"c6b3ec00-4b31-4c04-b90a-58d161c57811\") " pod="openstack/nova-cell1-cell-mapping-pwkn8" Nov 25 17:07:20 crc kubenswrapper[4812]: I1125 17:07:20.605155 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t87k2\" (UniqueName: \"kubernetes.io/projected/c6b3ec00-4b31-4c04-b90a-58d161c57811-kube-api-access-t87k2\") pod \"nova-cell1-cell-mapping-pwkn8\" (UID: \"c6b3ec00-4b31-4c04-b90a-58d161c57811\") " pod="openstack/nova-cell1-cell-mapping-pwkn8" Nov 25 17:07:20 crc kubenswrapper[4812]: I1125 17:07:20.748343 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-pwkn8" Nov 25 17:07:21 crc kubenswrapper[4812]: I1125 17:07:21.198231 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e","Type":"ContainerStarted","Data":"61c6bcb5896b79b6435f4372938772027e81000241f95fe81597275e78a8d066"} Nov 25 17:07:21 crc kubenswrapper[4812]: I1125 17:07:21.198498 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 17:07:21 crc kubenswrapper[4812]: I1125 17:07:21.206003 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d6340ade-dfc6-4e36-b86a-7315570c1b8e","Type":"ContainerStarted","Data":"321aa0528db5fa4549ed68126f193d3dda8d5113d8fcdf428cab7d8f2e976ee5"} Nov 25 17:07:21 crc kubenswrapper[4812]: I1125 17:07:21.206053 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d6340ade-dfc6-4e36-b86a-7315570c1b8e","Type":"ContainerStarted","Data":"5db3d216cbf1c906c58903624c96f44cb420ddb6986432ff1c1f8f60b2d64fef"} Nov 25 17:07:21 crc kubenswrapper[4812]: I1125 17:07:21.241648 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.2828242730000001 podStartE2EDuration="5.241634148s" podCreationTimestamp="2025-11-25 17:07:16 +0000 UTC" firstStartedPulling="2025-11-25 17:07:16.957358661 +0000 UTC m=+1211.797500746" lastFinishedPulling="2025-11-25 17:07:20.916168526 +0000 UTC m=+1215.756310621" observedRunningTime="2025-11-25 17:07:21.241194197 +0000 UTC m=+1216.081336282" watchObservedRunningTime="2025-11-25 17:07:21.241634148 +0000 UTC m=+1216.081776243" Nov 25 17:07:21 crc kubenswrapper[4812]: I1125 17:07:21.281267 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.281247449 podStartE2EDuration="2.281247449s" podCreationTimestamp="2025-11-25 17:07:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:07:21.27797277 +0000 UTC m=+1216.118114865" watchObservedRunningTime="2025-11-25 17:07:21.281247449 +0000 UTC m=+1216.121389544" Nov 25 17:07:21 crc kubenswrapper[4812]: I1125 17:07:21.297224 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-pwkn8"] Nov 25 17:07:21 crc kubenswrapper[4812]: I1125 17:07:21.615486 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5b856c5697-n2rck" Nov 25 17:07:21 crc kubenswrapper[4812]: I1125 17:07:21.687577 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-566b5b7845-z2wbp"] Nov 25 17:07:21 crc kubenswrapper[4812]: I1125 17:07:21.687814 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-566b5b7845-z2wbp" podUID="e418273d-c607-491b-aaa2-d30ee1cd1fb0" containerName="dnsmasq-dns" containerID="cri-o://0a0644fcecec06e1b5cec9a822554d9a25b6905719516cad894f6c65765609c5" gracePeriod=10 Nov 25 17:07:22 crc kubenswrapper[4812]: I1125 17:07:22.213021 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-566b5b7845-z2wbp" Nov 25 17:07:22 crc kubenswrapper[4812]: I1125 17:07:22.215725 4812 generic.go:334] "Generic (PLEG): container finished" podID="e418273d-c607-491b-aaa2-d30ee1cd1fb0" containerID="0a0644fcecec06e1b5cec9a822554d9a25b6905719516cad894f6c65765609c5" exitCode=0 Nov 25 17:07:22 crc kubenswrapper[4812]: I1125 17:07:22.215801 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-566b5b7845-z2wbp" event={"ID":"e418273d-c607-491b-aaa2-d30ee1cd1fb0","Type":"ContainerDied","Data":"0a0644fcecec06e1b5cec9a822554d9a25b6905719516cad894f6c65765609c5"} Nov 25 17:07:22 crc kubenswrapper[4812]: I1125 17:07:22.215828 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-566b5b7845-z2wbp" event={"ID":"e418273d-c607-491b-aaa2-d30ee1cd1fb0","Type":"ContainerDied","Data":"c6fee354d96d2627053a1602ca8241d7fb9809895ca452de75f3f2189eca7017"} Nov 25 17:07:22 crc kubenswrapper[4812]: I1125 17:07:22.215846 4812 scope.go:117] "RemoveContainer" containerID="0a0644fcecec06e1b5cec9a822554d9a25b6905719516cad894f6c65765609c5" Nov 25 17:07:22 crc kubenswrapper[4812]: I1125 17:07:22.216016 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-566b5b7845-z2wbp" Nov 25 17:07:22 crc kubenswrapper[4812]: I1125 17:07:22.218437 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-pwkn8" event={"ID":"c6b3ec00-4b31-4c04-b90a-58d161c57811","Type":"ContainerStarted","Data":"3e33fcdfbd7ac14e9fb59ced95c1013e9a8947b1f09d4f25e7c2ba22fd21cb38"} Nov 25 17:07:22 crc kubenswrapper[4812]: I1125 17:07:22.218499 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-pwkn8" event={"ID":"c6b3ec00-4b31-4c04-b90a-58d161c57811","Type":"ContainerStarted","Data":"2f6b0f525542aac6a3bf4e661775ddfc2e91e5c836cb62a4aa8ac105a511f281"} Nov 25 17:07:22 crc kubenswrapper[4812]: I1125 17:07:22.244108 4812 scope.go:117] "RemoveContainer" containerID="2d56116553003661e79e75091f6bd53ad5a3e4915b9d052596ccca45d49155f0" Nov 25 17:07:22 crc kubenswrapper[4812]: I1125 17:07:22.265775 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-pwkn8" podStartSLOduration=2.265759878 podStartE2EDuration="2.265759878s" podCreationTimestamp="2025-11-25 17:07:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:07:22.261362099 +0000 UTC m=+1217.101504194" watchObservedRunningTime="2025-11-25 17:07:22.265759878 +0000 UTC m=+1217.105901983" Nov 25 17:07:22 crc kubenswrapper[4812]: I1125 17:07:22.275747 4812 scope.go:117] "RemoveContainer" containerID="0a0644fcecec06e1b5cec9a822554d9a25b6905719516cad894f6c65765609c5" Nov 25 17:07:22 crc kubenswrapper[4812]: E1125 17:07:22.279707 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0a0644fcecec06e1b5cec9a822554d9a25b6905719516cad894f6c65765609c5\": container with ID starting with 0a0644fcecec06e1b5cec9a822554d9a25b6905719516cad894f6c65765609c5 not found: ID does not exist" containerID="0a0644fcecec06e1b5cec9a822554d9a25b6905719516cad894f6c65765609c5" Nov 25 17:07:22 crc kubenswrapper[4812]: I1125 17:07:22.279758 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0a0644fcecec06e1b5cec9a822554d9a25b6905719516cad894f6c65765609c5"} err="failed to get container status \"0a0644fcecec06e1b5cec9a822554d9a25b6905719516cad894f6c65765609c5\": rpc error: code = NotFound desc = could not find container \"0a0644fcecec06e1b5cec9a822554d9a25b6905719516cad894f6c65765609c5\": container with ID starting with 0a0644fcecec06e1b5cec9a822554d9a25b6905719516cad894f6c65765609c5 not found: ID does not exist" Nov 25 17:07:22 crc kubenswrapper[4812]: I1125 17:07:22.279787 4812 scope.go:117] "RemoveContainer" containerID="2d56116553003661e79e75091f6bd53ad5a3e4915b9d052596ccca45d49155f0" Nov 25 17:07:22 crc kubenswrapper[4812]: E1125 17:07:22.281657 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2d56116553003661e79e75091f6bd53ad5a3e4915b9d052596ccca45d49155f0\": container with ID starting with 2d56116553003661e79e75091f6bd53ad5a3e4915b9d052596ccca45d49155f0 not found: ID does not exist" containerID="2d56116553003661e79e75091f6bd53ad5a3e4915b9d052596ccca45d49155f0" Nov 25 17:07:22 crc kubenswrapper[4812]: I1125 17:07:22.281715 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d56116553003661e79e75091f6bd53ad5a3e4915b9d052596ccca45d49155f0"} err="failed to get container status \"2d56116553003661e79e75091f6bd53ad5a3e4915b9d052596ccca45d49155f0\": rpc error: code = NotFound desc = could not find container \"2d56116553003661e79e75091f6bd53ad5a3e4915b9d052596ccca45d49155f0\": container with ID starting with 2d56116553003661e79e75091f6bd53ad5a3e4915b9d052596ccca45d49155f0 not found: ID does not exist" Nov 25 17:07:22 crc kubenswrapper[4812]: I1125 17:07:22.410595 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e418273d-c607-491b-aaa2-d30ee1cd1fb0-config\") pod \"e418273d-c607-491b-aaa2-d30ee1cd1fb0\" (UID: \"e418273d-c607-491b-aaa2-d30ee1cd1fb0\") " Nov 25 17:07:22 crc kubenswrapper[4812]: I1125 17:07:22.410705 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kpczk\" (UniqueName: \"kubernetes.io/projected/e418273d-c607-491b-aaa2-d30ee1cd1fb0-kube-api-access-kpczk\") pod \"e418273d-c607-491b-aaa2-d30ee1cd1fb0\" (UID: \"e418273d-c607-491b-aaa2-d30ee1cd1fb0\") " Nov 25 17:07:22 crc kubenswrapper[4812]: I1125 17:07:22.410923 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e418273d-c607-491b-aaa2-d30ee1cd1fb0-ovsdbserver-nb\") pod \"e418273d-c607-491b-aaa2-d30ee1cd1fb0\" (UID: \"e418273d-c607-491b-aaa2-d30ee1cd1fb0\") " Nov 25 17:07:22 crc kubenswrapper[4812]: I1125 17:07:22.410959 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e418273d-c607-491b-aaa2-d30ee1cd1fb0-dns-svc\") pod \"e418273d-c607-491b-aaa2-d30ee1cd1fb0\" (UID: \"e418273d-c607-491b-aaa2-d30ee1cd1fb0\") " Nov 25 17:07:22 crc kubenswrapper[4812]: I1125 17:07:22.411105 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e418273d-c607-491b-aaa2-d30ee1cd1fb0-ovsdbserver-sb\") pod \"e418273d-c607-491b-aaa2-d30ee1cd1fb0\" (UID: \"e418273d-c607-491b-aaa2-d30ee1cd1fb0\") " Nov 25 17:07:22 crc kubenswrapper[4812]: I1125 17:07:22.418577 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e418273d-c607-491b-aaa2-d30ee1cd1fb0-kube-api-access-kpczk" (OuterVolumeSpecName: "kube-api-access-kpczk") pod "e418273d-c607-491b-aaa2-d30ee1cd1fb0" (UID: "e418273d-c607-491b-aaa2-d30ee1cd1fb0"). InnerVolumeSpecName "kube-api-access-kpczk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:07:22 crc kubenswrapper[4812]: I1125 17:07:22.475881 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e418273d-c607-491b-aaa2-d30ee1cd1fb0-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e418273d-c607-491b-aaa2-d30ee1cd1fb0" (UID: "e418273d-c607-491b-aaa2-d30ee1cd1fb0"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:07:22 crc kubenswrapper[4812]: I1125 17:07:22.478019 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e418273d-c607-491b-aaa2-d30ee1cd1fb0-config" (OuterVolumeSpecName: "config") pod "e418273d-c607-491b-aaa2-d30ee1cd1fb0" (UID: "e418273d-c607-491b-aaa2-d30ee1cd1fb0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:07:22 crc kubenswrapper[4812]: I1125 17:07:22.485555 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e418273d-c607-491b-aaa2-d30ee1cd1fb0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e418273d-c607-491b-aaa2-d30ee1cd1fb0" (UID: "e418273d-c607-491b-aaa2-d30ee1cd1fb0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:07:22 crc kubenswrapper[4812]: I1125 17:07:22.491070 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e418273d-c607-491b-aaa2-d30ee1cd1fb0-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e418273d-c607-491b-aaa2-d30ee1cd1fb0" (UID: "e418273d-c607-491b-aaa2-d30ee1cd1fb0"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:07:22 crc kubenswrapper[4812]: I1125 17:07:22.513488 4812 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e418273d-c607-491b-aaa2-d30ee1cd1fb0-config\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:22 crc kubenswrapper[4812]: I1125 17:07:22.513544 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kpczk\" (UniqueName: \"kubernetes.io/projected/e418273d-c607-491b-aaa2-d30ee1cd1fb0-kube-api-access-kpczk\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:22 crc kubenswrapper[4812]: I1125 17:07:22.513559 4812 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e418273d-c607-491b-aaa2-d30ee1cd1fb0-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:22 crc kubenswrapper[4812]: I1125 17:07:22.513575 4812 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e418273d-c607-491b-aaa2-d30ee1cd1fb0-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:22 crc kubenswrapper[4812]: I1125 17:07:22.513588 4812 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e418273d-c607-491b-aaa2-d30ee1cd1fb0-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:22 crc kubenswrapper[4812]: I1125 17:07:22.578748 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-566b5b7845-z2wbp"] Nov 25 17:07:22 crc kubenswrapper[4812]: I1125 17:07:22.585927 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-566b5b7845-z2wbp"] Nov 25 17:07:23 crc kubenswrapper[4812]: I1125 17:07:23.843903 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e418273d-c607-491b-aaa2-d30ee1cd1fb0" path="/var/lib/kubelet/pods/e418273d-c607-491b-aaa2-d30ee1cd1fb0/volumes" Nov 25 17:07:26 crc kubenswrapper[4812]: I1125 17:07:26.252735 4812 generic.go:334] "Generic (PLEG): container finished" podID="c6b3ec00-4b31-4c04-b90a-58d161c57811" containerID="3e33fcdfbd7ac14e9fb59ced95c1013e9a8947b1f09d4f25e7c2ba22fd21cb38" exitCode=0 Nov 25 17:07:26 crc kubenswrapper[4812]: I1125 17:07:26.252937 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-pwkn8" event={"ID":"c6b3ec00-4b31-4c04-b90a-58d161c57811","Type":"ContainerDied","Data":"3e33fcdfbd7ac14e9fb59ced95c1013e9a8947b1f09d4f25e7c2ba22fd21cb38"} Nov 25 17:07:27 crc kubenswrapper[4812]: I1125 17:07:27.332870 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:07:27 crc kubenswrapper[4812]: I1125 17:07:27.333435 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:07:27 crc kubenswrapper[4812]: I1125 17:07:27.615570 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-pwkn8" Nov 25 17:07:27 crc kubenswrapper[4812]: I1125 17:07:27.709030 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6b3ec00-4b31-4c04-b90a-58d161c57811-config-data\") pod \"c6b3ec00-4b31-4c04-b90a-58d161c57811\" (UID: \"c6b3ec00-4b31-4c04-b90a-58d161c57811\") " Nov 25 17:07:27 crc kubenswrapper[4812]: I1125 17:07:27.709177 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6b3ec00-4b31-4c04-b90a-58d161c57811-combined-ca-bundle\") pod \"c6b3ec00-4b31-4c04-b90a-58d161c57811\" (UID: \"c6b3ec00-4b31-4c04-b90a-58d161c57811\") " Nov 25 17:07:27 crc kubenswrapper[4812]: I1125 17:07:27.709222 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t87k2\" (UniqueName: \"kubernetes.io/projected/c6b3ec00-4b31-4c04-b90a-58d161c57811-kube-api-access-t87k2\") pod \"c6b3ec00-4b31-4c04-b90a-58d161c57811\" (UID: \"c6b3ec00-4b31-4c04-b90a-58d161c57811\") " Nov 25 17:07:27 crc kubenswrapper[4812]: I1125 17:07:27.710540 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c6b3ec00-4b31-4c04-b90a-58d161c57811-scripts\") pod \"c6b3ec00-4b31-4c04-b90a-58d161c57811\" (UID: \"c6b3ec00-4b31-4c04-b90a-58d161c57811\") " Nov 25 17:07:27 crc kubenswrapper[4812]: I1125 17:07:27.722093 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6b3ec00-4b31-4c04-b90a-58d161c57811-kube-api-access-t87k2" (OuterVolumeSpecName: "kube-api-access-t87k2") pod "c6b3ec00-4b31-4c04-b90a-58d161c57811" (UID: "c6b3ec00-4b31-4c04-b90a-58d161c57811"). InnerVolumeSpecName "kube-api-access-t87k2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:07:27 crc kubenswrapper[4812]: I1125 17:07:27.723423 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6b3ec00-4b31-4c04-b90a-58d161c57811-scripts" (OuterVolumeSpecName: "scripts") pod "c6b3ec00-4b31-4c04-b90a-58d161c57811" (UID: "c6b3ec00-4b31-4c04-b90a-58d161c57811"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:07:27 crc kubenswrapper[4812]: I1125 17:07:27.748653 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6b3ec00-4b31-4c04-b90a-58d161c57811-config-data" (OuterVolumeSpecName: "config-data") pod "c6b3ec00-4b31-4c04-b90a-58d161c57811" (UID: "c6b3ec00-4b31-4c04-b90a-58d161c57811"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:07:27 crc kubenswrapper[4812]: I1125 17:07:27.763468 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6b3ec00-4b31-4c04-b90a-58d161c57811-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c6b3ec00-4b31-4c04-b90a-58d161c57811" (UID: "c6b3ec00-4b31-4c04-b90a-58d161c57811"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:07:27 crc kubenswrapper[4812]: I1125 17:07:27.812845 4812 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6b3ec00-4b31-4c04-b90a-58d161c57811-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:27 crc kubenswrapper[4812]: I1125 17:07:27.812891 4812 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6b3ec00-4b31-4c04-b90a-58d161c57811-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:27 crc kubenswrapper[4812]: I1125 17:07:27.812909 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t87k2\" (UniqueName: \"kubernetes.io/projected/c6b3ec00-4b31-4c04-b90a-58d161c57811-kube-api-access-t87k2\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:27 crc kubenswrapper[4812]: I1125 17:07:27.812918 4812 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c6b3ec00-4b31-4c04-b90a-58d161c57811-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:28 crc kubenswrapper[4812]: I1125 17:07:28.278808 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-pwkn8" event={"ID":"c6b3ec00-4b31-4c04-b90a-58d161c57811","Type":"ContainerDied","Data":"2f6b0f525542aac6a3bf4e661775ddfc2e91e5c836cb62a4aa8ac105a511f281"} Nov 25 17:07:28 crc kubenswrapper[4812]: I1125 17:07:28.278844 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2f6b0f525542aac6a3bf4e661775ddfc2e91e5c836cb62a4aa8ac105a511f281" Nov 25 17:07:28 crc kubenswrapper[4812]: I1125 17:07:28.278882 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-pwkn8" Nov 25 17:07:28 crc kubenswrapper[4812]: I1125 17:07:28.844460 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 17:07:28 crc kubenswrapper[4812]: I1125 17:07:28.844822 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="d6340ade-dfc6-4e36-b86a-7315570c1b8e" containerName="nova-api-log" containerID="cri-o://5db3d216cbf1c906c58903624c96f44cb420ddb6986432ff1c1f8f60b2d64fef" gracePeriod=30 Nov 25 17:07:28 crc kubenswrapper[4812]: I1125 17:07:28.845353 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="d6340ade-dfc6-4e36-b86a-7315570c1b8e" containerName="nova-api-api" containerID="cri-o://321aa0528db5fa4549ed68126f193d3dda8d5113d8fcdf428cab7d8f2e976ee5" gracePeriod=30 Nov 25 17:07:28 crc kubenswrapper[4812]: I1125 17:07:28.856990 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 17:07:28 crc kubenswrapper[4812]: I1125 17:07:28.860316 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="8528f1d6-993c-4104-aa01-a67ed54fc82b" containerName="nova-scheduler-scheduler" containerID="cri-o://628353db558324cf8d470abd5746d894943d5cc71b4723d413d7caaeb963bc3c" gracePeriod=30 Nov 25 17:07:28 crc kubenswrapper[4812]: I1125 17:07:28.903787 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 17:07:28 crc kubenswrapper[4812]: I1125 17:07:28.904353 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="5d2e8567-1611-44ef-b206-e8c0baa4a215" containerName="nova-metadata-log" containerID="cri-o://79561625244e1d4a4572615d18e7034834da0e1795cace38a9da1b2c7d692fae" gracePeriod=30 Nov 25 17:07:28 crc kubenswrapper[4812]: I1125 17:07:28.904473 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="5d2e8567-1611-44ef-b206-e8c0baa4a215" containerName="nova-metadata-metadata" containerID="cri-o://e1218403a51d5f362a36434c0f6b7587f1068d795f96ce70d56260244ad9d82e" gracePeriod=30 Nov 25 17:07:29 crc kubenswrapper[4812]: I1125 17:07:29.293288 4812 generic.go:334] "Generic (PLEG): container finished" podID="d6340ade-dfc6-4e36-b86a-7315570c1b8e" containerID="321aa0528db5fa4549ed68126f193d3dda8d5113d8fcdf428cab7d8f2e976ee5" exitCode=0 Nov 25 17:07:29 crc kubenswrapper[4812]: I1125 17:07:29.293321 4812 generic.go:334] "Generic (PLEG): container finished" podID="d6340ade-dfc6-4e36-b86a-7315570c1b8e" containerID="5db3d216cbf1c906c58903624c96f44cb420ddb6986432ff1c1f8f60b2d64fef" exitCode=143 Nov 25 17:07:29 crc kubenswrapper[4812]: I1125 17:07:29.293382 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d6340ade-dfc6-4e36-b86a-7315570c1b8e","Type":"ContainerDied","Data":"321aa0528db5fa4549ed68126f193d3dda8d5113d8fcdf428cab7d8f2e976ee5"} Nov 25 17:07:29 crc kubenswrapper[4812]: I1125 17:07:29.293443 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d6340ade-dfc6-4e36-b86a-7315570c1b8e","Type":"ContainerDied","Data":"5db3d216cbf1c906c58903624c96f44cb420ddb6986432ff1c1f8f60b2d64fef"} Nov 25 17:07:29 crc kubenswrapper[4812]: I1125 17:07:29.295415 4812 generic.go:334] "Generic (PLEG): container finished" podID="5d2e8567-1611-44ef-b206-e8c0baa4a215" containerID="79561625244e1d4a4572615d18e7034834da0e1795cace38a9da1b2c7d692fae" exitCode=143 Nov 25 17:07:29 crc kubenswrapper[4812]: I1125 17:07:29.295441 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"5d2e8567-1611-44ef-b206-e8c0baa4a215","Type":"ContainerDied","Data":"79561625244e1d4a4572615d18e7034834da0e1795cace38a9da1b2c7d692fae"} Nov 25 17:07:29 crc kubenswrapper[4812]: I1125 17:07:29.407056 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 17:07:29 crc kubenswrapper[4812]: I1125 17:07:29.544102 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9tq2z\" (UniqueName: \"kubernetes.io/projected/d6340ade-dfc6-4e36-b86a-7315570c1b8e-kube-api-access-9tq2z\") pod \"d6340ade-dfc6-4e36-b86a-7315570c1b8e\" (UID: \"d6340ade-dfc6-4e36-b86a-7315570c1b8e\") " Nov 25 17:07:29 crc kubenswrapper[4812]: I1125 17:07:29.544153 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6340ade-dfc6-4e36-b86a-7315570c1b8e-public-tls-certs\") pod \"d6340ade-dfc6-4e36-b86a-7315570c1b8e\" (UID: \"d6340ade-dfc6-4e36-b86a-7315570c1b8e\") " Nov 25 17:07:29 crc kubenswrapper[4812]: I1125 17:07:29.544279 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6340ade-dfc6-4e36-b86a-7315570c1b8e-config-data\") pod \"d6340ade-dfc6-4e36-b86a-7315570c1b8e\" (UID: \"d6340ade-dfc6-4e36-b86a-7315570c1b8e\") " Nov 25 17:07:29 crc kubenswrapper[4812]: I1125 17:07:29.544457 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6340ade-dfc6-4e36-b86a-7315570c1b8e-combined-ca-bundle\") pod \"d6340ade-dfc6-4e36-b86a-7315570c1b8e\" (UID: \"d6340ade-dfc6-4e36-b86a-7315570c1b8e\") " Nov 25 17:07:29 crc kubenswrapper[4812]: I1125 17:07:29.545259 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6340ade-dfc6-4e36-b86a-7315570c1b8e-internal-tls-certs\") pod \"d6340ade-dfc6-4e36-b86a-7315570c1b8e\" (UID: \"d6340ade-dfc6-4e36-b86a-7315570c1b8e\") " Nov 25 17:07:29 crc kubenswrapper[4812]: I1125 17:07:29.545757 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d6340ade-dfc6-4e36-b86a-7315570c1b8e-logs" (OuterVolumeSpecName: "logs") pod "d6340ade-dfc6-4e36-b86a-7315570c1b8e" (UID: "d6340ade-dfc6-4e36-b86a-7315570c1b8e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:07:29 crc kubenswrapper[4812]: I1125 17:07:29.545918 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d6340ade-dfc6-4e36-b86a-7315570c1b8e-logs\") pod \"d6340ade-dfc6-4e36-b86a-7315570c1b8e\" (UID: \"d6340ade-dfc6-4e36-b86a-7315570c1b8e\") " Nov 25 17:07:29 crc kubenswrapper[4812]: I1125 17:07:29.546788 4812 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d6340ade-dfc6-4e36-b86a-7315570c1b8e-logs\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:29 crc kubenswrapper[4812]: I1125 17:07:29.552065 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d6340ade-dfc6-4e36-b86a-7315570c1b8e-kube-api-access-9tq2z" (OuterVolumeSpecName: "kube-api-access-9tq2z") pod "d6340ade-dfc6-4e36-b86a-7315570c1b8e" (UID: "d6340ade-dfc6-4e36-b86a-7315570c1b8e"). InnerVolumeSpecName "kube-api-access-9tq2z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:07:29 crc kubenswrapper[4812]: I1125 17:07:29.578413 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d6340ade-dfc6-4e36-b86a-7315570c1b8e-config-data" (OuterVolumeSpecName: "config-data") pod "d6340ade-dfc6-4e36-b86a-7315570c1b8e" (UID: "d6340ade-dfc6-4e36-b86a-7315570c1b8e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:07:29 crc kubenswrapper[4812]: I1125 17:07:29.578450 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d6340ade-dfc6-4e36-b86a-7315570c1b8e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d6340ade-dfc6-4e36-b86a-7315570c1b8e" (UID: "d6340ade-dfc6-4e36-b86a-7315570c1b8e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:07:29 crc kubenswrapper[4812]: I1125 17:07:29.597514 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d6340ade-dfc6-4e36-b86a-7315570c1b8e-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "d6340ade-dfc6-4e36-b86a-7315570c1b8e" (UID: "d6340ade-dfc6-4e36-b86a-7315570c1b8e"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:07:29 crc kubenswrapper[4812]: I1125 17:07:29.611878 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d6340ade-dfc6-4e36-b86a-7315570c1b8e-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "d6340ade-dfc6-4e36-b86a-7315570c1b8e" (UID: "d6340ade-dfc6-4e36-b86a-7315570c1b8e"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:07:29 crc kubenswrapper[4812]: I1125 17:07:29.648362 4812 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6340ade-dfc6-4e36-b86a-7315570c1b8e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:29 crc kubenswrapper[4812]: I1125 17:07:29.648395 4812 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6340ade-dfc6-4e36-b86a-7315570c1b8e-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:29 crc kubenswrapper[4812]: I1125 17:07:29.648405 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9tq2z\" (UniqueName: \"kubernetes.io/projected/d6340ade-dfc6-4e36-b86a-7315570c1b8e-kube-api-access-9tq2z\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:29 crc kubenswrapper[4812]: I1125 17:07:29.648416 4812 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6340ade-dfc6-4e36-b86a-7315570c1b8e-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:29 crc kubenswrapper[4812]: I1125 17:07:29.648424 4812 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6340ade-dfc6-4e36-b86a-7315570c1b8e-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.309657 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d6340ade-dfc6-4e36-b86a-7315570c1b8e","Type":"ContainerDied","Data":"026b23d957d98cdb328de886a41cb0d6c4a19b80abd82e9997aa2cd2b301f127"} Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.309747 4812 scope.go:117] "RemoveContainer" containerID="321aa0528db5fa4549ed68126f193d3dda8d5113d8fcdf428cab7d8f2e976ee5" Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.310278 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 17:07:30 crc kubenswrapper[4812]: E1125 17:07:30.327241 4812 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="628353db558324cf8d470abd5746d894943d5cc71b4723d413d7caaeb963bc3c" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 17:07:30 crc kubenswrapper[4812]: E1125 17:07:30.331827 4812 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="628353db558324cf8d470abd5746d894943d5cc71b4723d413d7caaeb963bc3c" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 17:07:30 crc kubenswrapper[4812]: E1125 17:07:30.334418 4812 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="628353db558324cf8d470abd5746d894943d5cc71b4723d413d7caaeb963bc3c" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 17:07:30 crc kubenswrapper[4812]: E1125 17:07:30.334479 4812 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="8528f1d6-993c-4104-aa01-a67ed54fc82b" containerName="nova-scheduler-scheduler" Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.335036 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.352998 4812 scope.go:117] "RemoveContainer" containerID="5db3d216cbf1c906c58903624c96f44cb420ddb6986432ff1c1f8f60b2d64fef" Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.377015 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.387212 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 17:07:30 crc kubenswrapper[4812]: E1125 17:07:30.387893 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e418273d-c607-491b-aaa2-d30ee1cd1fb0" containerName="dnsmasq-dns" Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.387915 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="e418273d-c607-491b-aaa2-d30ee1cd1fb0" containerName="dnsmasq-dns" Nov 25 17:07:30 crc kubenswrapper[4812]: E1125 17:07:30.387950 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e418273d-c607-491b-aaa2-d30ee1cd1fb0" containerName="init" Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.387957 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="e418273d-c607-491b-aaa2-d30ee1cd1fb0" containerName="init" Nov 25 17:07:30 crc kubenswrapper[4812]: E1125 17:07:30.387980 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d6340ade-dfc6-4e36-b86a-7315570c1b8e" containerName="nova-api-log" Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.387986 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="d6340ade-dfc6-4e36-b86a-7315570c1b8e" containerName="nova-api-log" Nov 25 17:07:30 crc kubenswrapper[4812]: E1125 17:07:30.387997 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d6340ade-dfc6-4e36-b86a-7315570c1b8e" containerName="nova-api-api" Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.388023 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="d6340ade-dfc6-4e36-b86a-7315570c1b8e" containerName="nova-api-api" Nov 25 17:07:30 crc kubenswrapper[4812]: E1125 17:07:30.388034 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6b3ec00-4b31-4c04-b90a-58d161c57811" containerName="nova-manage" Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.388041 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6b3ec00-4b31-4c04-b90a-58d161c57811" containerName="nova-manage" Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.388246 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="e418273d-c607-491b-aaa2-d30ee1cd1fb0" containerName="dnsmasq-dns" Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.388263 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6b3ec00-4b31-4c04-b90a-58d161c57811" containerName="nova-manage" Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.388276 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="d6340ade-dfc6-4e36-b86a-7315570c1b8e" containerName="nova-api-log" Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.388295 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="d6340ade-dfc6-4e36-b86a-7315570c1b8e" containerName="nova-api-api" Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.389474 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.392514 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.392858 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.393076 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.397386 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.574359 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9ee574ba-7f62-44c5-a6a7-c1a62704638c-public-tls-certs\") pod \"nova-api-0\" (UID: \"9ee574ba-7f62-44c5-a6a7-c1a62704638c\") " pod="openstack/nova-api-0" Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.574405 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5th8b\" (UniqueName: \"kubernetes.io/projected/9ee574ba-7f62-44c5-a6a7-c1a62704638c-kube-api-access-5th8b\") pod \"nova-api-0\" (UID: \"9ee574ba-7f62-44c5-a6a7-c1a62704638c\") " pod="openstack/nova-api-0" Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.574455 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9ee574ba-7f62-44c5-a6a7-c1a62704638c-internal-tls-certs\") pod \"nova-api-0\" (UID: \"9ee574ba-7f62-44c5-a6a7-c1a62704638c\") " pod="openstack/nova-api-0" Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.574493 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9ee574ba-7f62-44c5-a6a7-c1a62704638c-logs\") pod \"nova-api-0\" (UID: \"9ee574ba-7f62-44c5-a6a7-c1a62704638c\") " pod="openstack/nova-api-0" Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.574584 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ee574ba-7f62-44c5-a6a7-c1a62704638c-config-data\") pod \"nova-api-0\" (UID: \"9ee574ba-7f62-44c5-a6a7-c1a62704638c\") " pod="openstack/nova-api-0" Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.574661 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ee574ba-7f62-44c5-a6a7-c1a62704638c-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"9ee574ba-7f62-44c5-a6a7-c1a62704638c\") " pod="openstack/nova-api-0" Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.676156 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9ee574ba-7f62-44c5-a6a7-c1a62704638c-internal-tls-certs\") pod \"nova-api-0\" (UID: \"9ee574ba-7f62-44c5-a6a7-c1a62704638c\") " pod="openstack/nova-api-0" Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.676227 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9ee574ba-7f62-44c5-a6a7-c1a62704638c-logs\") pod \"nova-api-0\" (UID: \"9ee574ba-7f62-44c5-a6a7-c1a62704638c\") " pod="openstack/nova-api-0" Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.676257 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ee574ba-7f62-44c5-a6a7-c1a62704638c-config-data\") pod \"nova-api-0\" (UID: \"9ee574ba-7f62-44c5-a6a7-c1a62704638c\") " pod="openstack/nova-api-0" Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.676688 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9ee574ba-7f62-44c5-a6a7-c1a62704638c-logs\") pod \"nova-api-0\" (UID: \"9ee574ba-7f62-44c5-a6a7-c1a62704638c\") " pod="openstack/nova-api-0" Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.676771 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ee574ba-7f62-44c5-a6a7-c1a62704638c-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"9ee574ba-7f62-44c5-a6a7-c1a62704638c\") " pod="openstack/nova-api-0" Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.677257 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9ee574ba-7f62-44c5-a6a7-c1a62704638c-public-tls-certs\") pod \"nova-api-0\" (UID: \"9ee574ba-7f62-44c5-a6a7-c1a62704638c\") " pod="openstack/nova-api-0" Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.677379 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5th8b\" (UniqueName: \"kubernetes.io/projected/9ee574ba-7f62-44c5-a6a7-c1a62704638c-kube-api-access-5th8b\") pod \"nova-api-0\" (UID: \"9ee574ba-7f62-44c5-a6a7-c1a62704638c\") " pod="openstack/nova-api-0" Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.680996 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ee574ba-7f62-44c5-a6a7-c1a62704638c-config-data\") pod \"nova-api-0\" (UID: \"9ee574ba-7f62-44c5-a6a7-c1a62704638c\") " pod="openstack/nova-api-0" Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.681196 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ee574ba-7f62-44c5-a6a7-c1a62704638c-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"9ee574ba-7f62-44c5-a6a7-c1a62704638c\") " pod="openstack/nova-api-0" Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.681334 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9ee574ba-7f62-44c5-a6a7-c1a62704638c-public-tls-certs\") pod \"nova-api-0\" (UID: \"9ee574ba-7f62-44c5-a6a7-c1a62704638c\") " pod="openstack/nova-api-0" Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.681367 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9ee574ba-7f62-44c5-a6a7-c1a62704638c-internal-tls-certs\") pod \"nova-api-0\" (UID: \"9ee574ba-7f62-44c5-a6a7-c1a62704638c\") " pod="openstack/nova-api-0" Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.696347 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5th8b\" (UniqueName: \"kubernetes.io/projected/9ee574ba-7f62-44c5-a6a7-c1a62704638c-kube-api-access-5th8b\") pod \"nova-api-0\" (UID: \"9ee574ba-7f62-44c5-a6a7-c1a62704638c\") " pod="openstack/nova-api-0" Nov 25 17:07:30 crc kubenswrapper[4812]: I1125 17:07:30.714635 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 17:07:31 crc kubenswrapper[4812]: I1125 17:07:31.176941 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 17:07:31 crc kubenswrapper[4812]: W1125 17:07:31.180588 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9ee574ba_7f62_44c5_a6a7_c1a62704638c.slice/crio-5c4fc94e0af3b3bf2da87d11cac24bed5ee9e58f0b6a0861a5bb62778b0445f9 WatchSource:0}: Error finding container 5c4fc94e0af3b3bf2da87d11cac24bed5ee9e58f0b6a0861a5bb62778b0445f9: Status 404 returned error can't find the container with id 5c4fc94e0af3b3bf2da87d11cac24bed5ee9e58f0b6a0861a5bb62778b0445f9 Nov 25 17:07:31 crc kubenswrapper[4812]: I1125 17:07:31.321016 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9ee574ba-7f62-44c5-a6a7-c1a62704638c","Type":"ContainerStarted","Data":"5c4fc94e0af3b3bf2da87d11cac24bed5ee9e58f0b6a0861a5bb62778b0445f9"} Nov 25 17:07:31 crc kubenswrapper[4812]: I1125 17:07:31.842255 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d6340ade-dfc6-4e36-b86a-7315570c1b8e" path="/var/lib/kubelet/pods/d6340ade-dfc6-4e36-b86a-7315570c1b8e/volumes" Nov 25 17:07:32 crc kubenswrapper[4812]: I1125 17:07:32.331712 4812 generic.go:334] "Generic (PLEG): container finished" podID="5d2e8567-1611-44ef-b206-e8c0baa4a215" containerID="e1218403a51d5f362a36434c0f6b7587f1068d795f96ce70d56260244ad9d82e" exitCode=0 Nov 25 17:07:32 crc kubenswrapper[4812]: I1125 17:07:32.331777 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"5d2e8567-1611-44ef-b206-e8c0baa4a215","Type":"ContainerDied","Data":"e1218403a51d5f362a36434c0f6b7587f1068d795f96ce70d56260244ad9d82e"} Nov 25 17:07:32 crc kubenswrapper[4812]: I1125 17:07:32.334952 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9ee574ba-7f62-44c5-a6a7-c1a62704638c","Type":"ContainerStarted","Data":"b59386be5679aa46075aac4009ddc36a25ffbd6aeb8c219ace544f543430dee4"} Nov 25 17:07:32 crc kubenswrapper[4812]: I1125 17:07:32.334994 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"9ee574ba-7f62-44c5-a6a7-c1a62704638c","Type":"ContainerStarted","Data":"77059f23f2655399c37da4d18af228fdc2dbb5dc90f8702a0dc92afd1d1763f8"} Nov 25 17:07:32 crc kubenswrapper[4812]: I1125 17:07:32.351063 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.351039238 podStartE2EDuration="2.351039238s" podCreationTimestamp="2025-11-25 17:07:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:07:32.349867457 +0000 UTC m=+1227.190009552" watchObservedRunningTime="2025-11-25 17:07:32.351039238 +0000 UTC m=+1227.191181353" Nov 25 17:07:32 crc kubenswrapper[4812]: I1125 17:07:32.458106 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 17:07:32 crc kubenswrapper[4812]: I1125 17:07:32.611141 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5d2e8567-1611-44ef-b206-e8c0baa4a215-logs\") pod \"5d2e8567-1611-44ef-b206-e8c0baa4a215\" (UID: \"5d2e8567-1611-44ef-b206-e8c0baa4a215\") " Nov 25 17:07:32 crc kubenswrapper[4812]: I1125 17:07:32.611205 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b5frg\" (UniqueName: \"kubernetes.io/projected/5d2e8567-1611-44ef-b206-e8c0baa4a215-kube-api-access-b5frg\") pod \"5d2e8567-1611-44ef-b206-e8c0baa4a215\" (UID: \"5d2e8567-1611-44ef-b206-e8c0baa4a215\") " Nov 25 17:07:32 crc kubenswrapper[4812]: I1125 17:07:32.611239 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d2e8567-1611-44ef-b206-e8c0baa4a215-combined-ca-bundle\") pod \"5d2e8567-1611-44ef-b206-e8c0baa4a215\" (UID: \"5d2e8567-1611-44ef-b206-e8c0baa4a215\") " Nov 25 17:07:32 crc kubenswrapper[4812]: I1125 17:07:32.611296 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d2e8567-1611-44ef-b206-e8c0baa4a215-config-data\") pod \"5d2e8567-1611-44ef-b206-e8c0baa4a215\" (UID: \"5d2e8567-1611-44ef-b206-e8c0baa4a215\") " Nov 25 17:07:32 crc kubenswrapper[4812]: I1125 17:07:32.611322 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d2e8567-1611-44ef-b206-e8c0baa4a215-nova-metadata-tls-certs\") pod \"5d2e8567-1611-44ef-b206-e8c0baa4a215\" (UID: \"5d2e8567-1611-44ef-b206-e8c0baa4a215\") " Nov 25 17:07:32 crc kubenswrapper[4812]: I1125 17:07:32.612027 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5d2e8567-1611-44ef-b206-e8c0baa4a215-logs" (OuterVolumeSpecName: "logs") pod "5d2e8567-1611-44ef-b206-e8c0baa4a215" (UID: "5d2e8567-1611-44ef-b206-e8c0baa4a215"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:07:32 crc kubenswrapper[4812]: I1125 17:07:32.616910 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d2e8567-1611-44ef-b206-e8c0baa4a215-kube-api-access-b5frg" (OuterVolumeSpecName: "kube-api-access-b5frg") pod "5d2e8567-1611-44ef-b206-e8c0baa4a215" (UID: "5d2e8567-1611-44ef-b206-e8c0baa4a215"). InnerVolumeSpecName "kube-api-access-b5frg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:07:32 crc kubenswrapper[4812]: I1125 17:07:32.635348 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d2e8567-1611-44ef-b206-e8c0baa4a215-config-data" (OuterVolumeSpecName: "config-data") pod "5d2e8567-1611-44ef-b206-e8c0baa4a215" (UID: "5d2e8567-1611-44ef-b206-e8c0baa4a215"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:07:32 crc kubenswrapper[4812]: I1125 17:07:32.636232 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d2e8567-1611-44ef-b206-e8c0baa4a215-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5d2e8567-1611-44ef-b206-e8c0baa4a215" (UID: "5d2e8567-1611-44ef-b206-e8c0baa4a215"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:07:32 crc kubenswrapper[4812]: I1125 17:07:32.660558 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d2e8567-1611-44ef-b206-e8c0baa4a215-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "5d2e8567-1611-44ef-b206-e8c0baa4a215" (UID: "5d2e8567-1611-44ef-b206-e8c0baa4a215"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:07:32 crc kubenswrapper[4812]: I1125 17:07:32.713381 4812 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d2e8567-1611-44ef-b206-e8c0baa4a215-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:32 crc kubenswrapper[4812]: I1125 17:07:32.713567 4812 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d2e8567-1611-44ef-b206-e8c0baa4a215-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:32 crc kubenswrapper[4812]: I1125 17:07:32.713669 4812 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d2e8567-1611-44ef-b206-e8c0baa4a215-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:32 crc kubenswrapper[4812]: I1125 17:07:32.713772 4812 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5d2e8567-1611-44ef-b206-e8c0baa4a215-logs\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:32 crc kubenswrapper[4812]: I1125 17:07:32.713874 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b5frg\" (UniqueName: \"kubernetes.io/projected/5d2e8567-1611-44ef-b206-e8c0baa4a215-kube-api-access-b5frg\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:33 crc kubenswrapper[4812]: I1125 17:07:33.344896 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 17:07:33 crc kubenswrapper[4812]: I1125 17:07:33.344887 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"5d2e8567-1611-44ef-b206-e8c0baa4a215","Type":"ContainerDied","Data":"b9830381cc13a4b4c8877533229f1bd4cbedde8b115b33181a48aa404be1b157"} Nov 25 17:07:33 crc kubenswrapper[4812]: I1125 17:07:33.345433 4812 scope.go:117] "RemoveContainer" containerID="e1218403a51d5f362a36434c0f6b7587f1068d795f96ce70d56260244ad9d82e" Nov 25 17:07:33 crc kubenswrapper[4812]: I1125 17:07:33.365579 4812 scope.go:117] "RemoveContainer" containerID="79561625244e1d4a4572615d18e7034834da0e1795cace38a9da1b2c7d692fae" Nov 25 17:07:33 crc kubenswrapper[4812]: I1125 17:07:33.376307 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 17:07:33 crc kubenswrapper[4812]: I1125 17:07:33.387118 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 17:07:33 crc kubenswrapper[4812]: I1125 17:07:33.398287 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 17:07:33 crc kubenswrapper[4812]: E1125 17:07:33.399042 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d2e8567-1611-44ef-b206-e8c0baa4a215" containerName="nova-metadata-metadata" Nov 25 17:07:33 crc kubenswrapper[4812]: I1125 17:07:33.399071 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d2e8567-1611-44ef-b206-e8c0baa4a215" containerName="nova-metadata-metadata" Nov 25 17:07:33 crc kubenswrapper[4812]: E1125 17:07:33.399100 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d2e8567-1611-44ef-b206-e8c0baa4a215" containerName="nova-metadata-log" Nov 25 17:07:33 crc kubenswrapper[4812]: I1125 17:07:33.399113 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d2e8567-1611-44ef-b206-e8c0baa4a215" containerName="nova-metadata-log" Nov 25 17:07:33 crc kubenswrapper[4812]: I1125 17:07:33.399422 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d2e8567-1611-44ef-b206-e8c0baa4a215" containerName="nova-metadata-metadata" Nov 25 17:07:33 crc kubenswrapper[4812]: I1125 17:07:33.399460 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d2e8567-1611-44ef-b206-e8c0baa4a215" containerName="nova-metadata-log" Nov 25 17:07:33 crc kubenswrapper[4812]: I1125 17:07:33.400608 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 17:07:33 crc kubenswrapper[4812]: I1125 17:07:33.403124 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 25 17:07:33 crc kubenswrapper[4812]: I1125 17:07:33.403197 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 17:07:33 crc kubenswrapper[4812]: I1125 17:07:33.408798 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 17:07:33 crc kubenswrapper[4812]: I1125 17:07:33.425640 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-75vlh\" (UniqueName: \"kubernetes.io/projected/9626e4b9-4c10-4d8f-962f-03dbe9bfea89-kube-api-access-75vlh\") pod \"nova-metadata-0\" (UID: \"9626e4b9-4c10-4d8f-962f-03dbe9bfea89\") " pod="openstack/nova-metadata-0" Nov 25 17:07:33 crc kubenswrapper[4812]: I1125 17:07:33.425751 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9626e4b9-4c10-4d8f-962f-03dbe9bfea89-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9626e4b9-4c10-4d8f-962f-03dbe9bfea89\") " pod="openstack/nova-metadata-0" Nov 25 17:07:33 crc kubenswrapper[4812]: I1125 17:07:33.425847 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9626e4b9-4c10-4d8f-962f-03dbe9bfea89-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9626e4b9-4c10-4d8f-962f-03dbe9bfea89\") " pod="openstack/nova-metadata-0" Nov 25 17:07:33 crc kubenswrapper[4812]: I1125 17:07:33.425901 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9626e4b9-4c10-4d8f-962f-03dbe9bfea89-logs\") pod \"nova-metadata-0\" (UID: \"9626e4b9-4c10-4d8f-962f-03dbe9bfea89\") " pod="openstack/nova-metadata-0" Nov 25 17:07:33 crc kubenswrapper[4812]: I1125 17:07:33.426039 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9626e4b9-4c10-4d8f-962f-03dbe9bfea89-config-data\") pod \"nova-metadata-0\" (UID: \"9626e4b9-4c10-4d8f-962f-03dbe9bfea89\") " pod="openstack/nova-metadata-0" Nov 25 17:07:33 crc kubenswrapper[4812]: I1125 17:07:33.527199 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9626e4b9-4c10-4d8f-962f-03dbe9bfea89-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9626e4b9-4c10-4d8f-962f-03dbe9bfea89\") " pod="openstack/nova-metadata-0" Nov 25 17:07:33 crc kubenswrapper[4812]: I1125 17:07:33.527294 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9626e4b9-4c10-4d8f-962f-03dbe9bfea89-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9626e4b9-4c10-4d8f-962f-03dbe9bfea89\") " pod="openstack/nova-metadata-0" Nov 25 17:07:33 crc kubenswrapper[4812]: I1125 17:07:33.527337 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9626e4b9-4c10-4d8f-962f-03dbe9bfea89-logs\") pod \"nova-metadata-0\" (UID: \"9626e4b9-4c10-4d8f-962f-03dbe9bfea89\") " pod="openstack/nova-metadata-0" Nov 25 17:07:33 crc kubenswrapper[4812]: I1125 17:07:33.527409 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9626e4b9-4c10-4d8f-962f-03dbe9bfea89-config-data\") pod \"nova-metadata-0\" (UID: \"9626e4b9-4c10-4d8f-962f-03dbe9bfea89\") " pod="openstack/nova-metadata-0" Nov 25 17:07:33 crc kubenswrapper[4812]: I1125 17:07:33.527459 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-75vlh\" (UniqueName: \"kubernetes.io/projected/9626e4b9-4c10-4d8f-962f-03dbe9bfea89-kube-api-access-75vlh\") pod \"nova-metadata-0\" (UID: \"9626e4b9-4c10-4d8f-962f-03dbe9bfea89\") " pod="openstack/nova-metadata-0" Nov 25 17:07:33 crc kubenswrapper[4812]: I1125 17:07:33.528020 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9626e4b9-4c10-4d8f-962f-03dbe9bfea89-logs\") pod \"nova-metadata-0\" (UID: \"9626e4b9-4c10-4d8f-962f-03dbe9bfea89\") " pod="openstack/nova-metadata-0" Nov 25 17:07:33 crc kubenswrapper[4812]: I1125 17:07:33.531071 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9626e4b9-4c10-4d8f-962f-03dbe9bfea89-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9626e4b9-4c10-4d8f-962f-03dbe9bfea89\") " pod="openstack/nova-metadata-0" Nov 25 17:07:33 crc kubenswrapper[4812]: I1125 17:07:33.531546 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9626e4b9-4c10-4d8f-962f-03dbe9bfea89-config-data\") pod \"nova-metadata-0\" (UID: \"9626e4b9-4c10-4d8f-962f-03dbe9bfea89\") " pod="openstack/nova-metadata-0" Nov 25 17:07:33 crc kubenswrapper[4812]: I1125 17:07:33.537703 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9626e4b9-4c10-4d8f-962f-03dbe9bfea89-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9626e4b9-4c10-4d8f-962f-03dbe9bfea89\") " pod="openstack/nova-metadata-0" Nov 25 17:07:33 crc kubenswrapper[4812]: I1125 17:07:33.547593 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-75vlh\" (UniqueName: \"kubernetes.io/projected/9626e4b9-4c10-4d8f-962f-03dbe9bfea89-kube-api-access-75vlh\") pod \"nova-metadata-0\" (UID: \"9626e4b9-4c10-4d8f-962f-03dbe9bfea89\") " pod="openstack/nova-metadata-0" Nov 25 17:07:33 crc kubenswrapper[4812]: I1125 17:07:33.724827 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 17:07:33 crc kubenswrapper[4812]: I1125 17:07:33.845676 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5d2e8567-1611-44ef-b206-e8c0baa4a215" path="/var/lib/kubelet/pods/5d2e8567-1611-44ef-b206-e8c0baa4a215/volumes" Nov 25 17:07:34 crc kubenswrapper[4812]: I1125 17:07:34.167582 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 17:07:34 crc kubenswrapper[4812]: W1125 17:07:34.172607 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9626e4b9_4c10_4d8f_962f_03dbe9bfea89.slice/crio-61ebf63a7a8e6741554491796ae24c8747ce7b948666dacc8a65ce652a631d81 WatchSource:0}: Error finding container 61ebf63a7a8e6741554491796ae24c8747ce7b948666dacc8a65ce652a631d81: Status 404 returned error can't find the container with id 61ebf63a7a8e6741554491796ae24c8747ce7b948666dacc8a65ce652a631d81 Nov 25 17:07:34 crc kubenswrapper[4812]: I1125 17:07:34.184040 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 17:07:34 crc kubenswrapper[4812]: I1125 17:07:34.241507 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8528f1d6-993c-4104-aa01-a67ed54fc82b-config-data\") pod \"8528f1d6-993c-4104-aa01-a67ed54fc82b\" (UID: \"8528f1d6-993c-4104-aa01-a67ed54fc82b\") " Nov 25 17:07:34 crc kubenswrapper[4812]: I1125 17:07:34.241870 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mmpln\" (UniqueName: \"kubernetes.io/projected/8528f1d6-993c-4104-aa01-a67ed54fc82b-kube-api-access-mmpln\") pod \"8528f1d6-993c-4104-aa01-a67ed54fc82b\" (UID: \"8528f1d6-993c-4104-aa01-a67ed54fc82b\") " Nov 25 17:07:34 crc kubenswrapper[4812]: I1125 17:07:34.241898 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8528f1d6-993c-4104-aa01-a67ed54fc82b-combined-ca-bundle\") pod \"8528f1d6-993c-4104-aa01-a67ed54fc82b\" (UID: \"8528f1d6-993c-4104-aa01-a67ed54fc82b\") " Nov 25 17:07:34 crc kubenswrapper[4812]: I1125 17:07:34.247131 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8528f1d6-993c-4104-aa01-a67ed54fc82b-kube-api-access-mmpln" (OuterVolumeSpecName: "kube-api-access-mmpln") pod "8528f1d6-993c-4104-aa01-a67ed54fc82b" (UID: "8528f1d6-993c-4104-aa01-a67ed54fc82b"). InnerVolumeSpecName "kube-api-access-mmpln". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:07:34 crc kubenswrapper[4812]: I1125 17:07:34.275835 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8528f1d6-993c-4104-aa01-a67ed54fc82b-config-data" (OuterVolumeSpecName: "config-data") pod "8528f1d6-993c-4104-aa01-a67ed54fc82b" (UID: "8528f1d6-993c-4104-aa01-a67ed54fc82b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:07:34 crc kubenswrapper[4812]: I1125 17:07:34.281666 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8528f1d6-993c-4104-aa01-a67ed54fc82b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8528f1d6-993c-4104-aa01-a67ed54fc82b" (UID: "8528f1d6-993c-4104-aa01-a67ed54fc82b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:07:34 crc kubenswrapper[4812]: I1125 17:07:34.344095 4812 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8528f1d6-993c-4104-aa01-a67ed54fc82b-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:34 crc kubenswrapper[4812]: I1125 17:07:34.344199 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mmpln\" (UniqueName: \"kubernetes.io/projected/8528f1d6-993c-4104-aa01-a67ed54fc82b-kube-api-access-mmpln\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:34 crc kubenswrapper[4812]: I1125 17:07:34.344214 4812 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8528f1d6-993c-4104-aa01-a67ed54fc82b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:07:34 crc kubenswrapper[4812]: I1125 17:07:34.367635 4812 generic.go:334] "Generic (PLEG): container finished" podID="8528f1d6-993c-4104-aa01-a67ed54fc82b" containerID="628353db558324cf8d470abd5746d894943d5cc71b4723d413d7caaeb963bc3c" exitCode=0 Nov 25 17:07:34 crc kubenswrapper[4812]: I1125 17:07:34.367707 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 17:07:34 crc kubenswrapper[4812]: I1125 17:07:34.367728 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"8528f1d6-993c-4104-aa01-a67ed54fc82b","Type":"ContainerDied","Data":"628353db558324cf8d470abd5746d894943d5cc71b4723d413d7caaeb963bc3c"} Nov 25 17:07:34 crc kubenswrapper[4812]: I1125 17:07:34.367753 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"8528f1d6-993c-4104-aa01-a67ed54fc82b","Type":"ContainerDied","Data":"e599bd0dc215fe8c655c1dd57cf271e7f03053f0eb7e5ac3fe852835efeccce4"} Nov 25 17:07:34 crc kubenswrapper[4812]: I1125 17:07:34.367769 4812 scope.go:117] "RemoveContainer" containerID="628353db558324cf8d470abd5746d894943d5cc71b4723d413d7caaeb963bc3c" Nov 25 17:07:34 crc kubenswrapper[4812]: I1125 17:07:34.371554 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9626e4b9-4c10-4d8f-962f-03dbe9bfea89","Type":"ContainerStarted","Data":"055e9c94710563a57f626cc3b51bd6e2e49cf3339b6155a894533995de3f9ea0"} Nov 25 17:07:34 crc kubenswrapper[4812]: I1125 17:07:34.371594 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9626e4b9-4c10-4d8f-962f-03dbe9bfea89","Type":"ContainerStarted","Data":"61ebf63a7a8e6741554491796ae24c8747ce7b948666dacc8a65ce652a631d81"} Nov 25 17:07:34 crc kubenswrapper[4812]: I1125 17:07:34.403390 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 17:07:34 crc kubenswrapper[4812]: I1125 17:07:34.408132 4812 scope.go:117] "RemoveContainer" containerID="628353db558324cf8d470abd5746d894943d5cc71b4723d413d7caaeb963bc3c" Nov 25 17:07:34 crc kubenswrapper[4812]: I1125 17:07:34.410350 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 17:07:34 crc kubenswrapper[4812]: E1125 17:07:34.410930 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"628353db558324cf8d470abd5746d894943d5cc71b4723d413d7caaeb963bc3c\": container with ID starting with 628353db558324cf8d470abd5746d894943d5cc71b4723d413d7caaeb963bc3c not found: ID does not exist" containerID="628353db558324cf8d470abd5746d894943d5cc71b4723d413d7caaeb963bc3c" Nov 25 17:07:34 crc kubenswrapper[4812]: I1125 17:07:34.410973 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"628353db558324cf8d470abd5746d894943d5cc71b4723d413d7caaeb963bc3c"} err="failed to get container status \"628353db558324cf8d470abd5746d894943d5cc71b4723d413d7caaeb963bc3c\": rpc error: code = NotFound desc = could not find container \"628353db558324cf8d470abd5746d894943d5cc71b4723d413d7caaeb963bc3c\": container with ID starting with 628353db558324cf8d470abd5746d894943d5cc71b4723d413d7caaeb963bc3c not found: ID does not exist" Nov 25 17:07:34 crc kubenswrapper[4812]: I1125 17:07:34.425921 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 17:07:34 crc kubenswrapper[4812]: E1125 17:07:34.426409 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8528f1d6-993c-4104-aa01-a67ed54fc82b" containerName="nova-scheduler-scheduler" Nov 25 17:07:34 crc kubenswrapper[4812]: I1125 17:07:34.426426 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="8528f1d6-993c-4104-aa01-a67ed54fc82b" containerName="nova-scheduler-scheduler" Nov 25 17:07:34 crc kubenswrapper[4812]: I1125 17:07:34.426710 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="8528f1d6-993c-4104-aa01-a67ed54fc82b" containerName="nova-scheduler-scheduler" Nov 25 17:07:34 crc kubenswrapper[4812]: I1125 17:07:34.427362 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 17:07:34 crc kubenswrapper[4812]: I1125 17:07:34.429910 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 25 17:07:34 crc kubenswrapper[4812]: I1125 17:07:34.440441 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 17:07:34 crc kubenswrapper[4812]: I1125 17:07:34.447781 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/105c4d96-d73f-4b98-b37b-450ddb399152-config-data\") pod \"nova-scheduler-0\" (UID: \"105c4d96-d73f-4b98-b37b-450ddb399152\") " pod="openstack/nova-scheduler-0" Nov 25 17:07:34 crc kubenswrapper[4812]: I1125 17:07:34.447855 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d7hpk\" (UniqueName: \"kubernetes.io/projected/105c4d96-d73f-4b98-b37b-450ddb399152-kube-api-access-d7hpk\") pod \"nova-scheduler-0\" (UID: \"105c4d96-d73f-4b98-b37b-450ddb399152\") " pod="openstack/nova-scheduler-0" Nov 25 17:07:34 crc kubenswrapper[4812]: I1125 17:07:34.447946 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/105c4d96-d73f-4b98-b37b-450ddb399152-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"105c4d96-d73f-4b98-b37b-450ddb399152\") " pod="openstack/nova-scheduler-0" Nov 25 17:07:34 crc kubenswrapper[4812]: I1125 17:07:34.550079 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d7hpk\" (UniqueName: \"kubernetes.io/projected/105c4d96-d73f-4b98-b37b-450ddb399152-kube-api-access-d7hpk\") pod \"nova-scheduler-0\" (UID: \"105c4d96-d73f-4b98-b37b-450ddb399152\") " pod="openstack/nova-scheduler-0" Nov 25 17:07:34 crc kubenswrapper[4812]: I1125 17:07:34.550167 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/105c4d96-d73f-4b98-b37b-450ddb399152-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"105c4d96-d73f-4b98-b37b-450ddb399152\") " pod="openstack/nova-scheduler-0" Nov 25 17:07:34 crc kubenswrapper[4812]: I1125 17:07:34.550259 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/105c4d96-d73f-4b98-b37b-450ddb399152-config-data\") pod \"nova-scheduler-0\" (UID: \"105c4d96-d73f-4b98-b37b-450ddb399152\") " pod="openstack/nova-scheduler-0" Nov 25 17:07:34 crc kubenswrapper[4812]: I1125 17:07:34.556080 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/105c4d96-d73f-4b98-b37b-450ddb399152-config-data\") pod \"nova-scheduler-0\" (UID: \"105c4d96-d73f-4b98-b37b-450ddb399152\") " pod="openstack/nova-scheduler-0" Nov 25 17:07:34 crc kubenswrapper[4812]: I1125 17:07:34.556146 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/105c4d96-d73f-4b98-b37b-450ddb399152-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"105c4d96-d73f-4b98-b37b-450ddb399152\") " pod="openstack/nova-scheduler-0" Nov 25 17:07:34 crc kubenswrapper[4812]: I1125 17:07:34.566279 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d7hpk\" (UniqueName: \"kubernetes.io/projected/105c4d96-d73f-4b98-b37b-450ddb399152-kube-api-access-d7hpk\") pod \"nova-scheduler-0\" (UID: \"105c4d96-d73f-4b98-b37b-450ddb399152\") " pod="openstack/nova-scheduler-0" Nov 25 17:07:34 crc kubenswrapper[4812]: I1125 17:07:34.747313 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 17:07:35 crc kubenswrapper[4812]: I1125 17:07:35.232209 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 17:07:35 crc kubenswrapper[4812]: W1125 17:07:35.239969 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod105c4d96_d73f_4b98_b37b_450ddb399152.slice/crio-fa2ff0bbe83065d189b58b60621ecb3e193aaacc63e7dbd5aff724f2c049a5bb WatchSource:0}: Error finding container fa2ff0bbe83065d189b58b60621ecb3e193aaacc63e7dbd5aff724f2c049a5bb: Status 404 returned error can't find the container with id fa2ff0bbe83065d189b58b60621ecb3e193aaacc63e7dbd5aff724f2c049a5bb Nov 25 17:07:35 crc kubenswrapper[4812]: I1125 17:07:35.391913 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"105c4d96-d73f-4b98-b37b-450ddb399152","Type":"ContainerStarted","Data":"fa2ff0bbe83065d189b58b60621ecb3e193aaacc63e7dbd5aff724f2c049a5bb"} Nov 25 17:07:35 crc kubenswrapper[4812]: I1125 17:07:35.395598 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9626e4b9-4c10-4d8f-962f-03dbe9bfea89","Type":"ContainerStarted","Data":"5ecc259f47ae7647773cecfef3e327d6198739689e9e72b4e58fef463ae46202"} Nov 25 17:07:35 crc kubenswrapper[4812]: I1125 17:07:35.843953 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8528f1d6-993c-4104-aa01-a67ed54fc82b" path="/var/lib/kubelet/pods/8528f1d6-993c-4104-aa01-a67ed54fc82b/volumes" Nov 25 17:07:35 crc kubenswrapper[4812]: I1125 17:07:35.859014 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.858996019 podStartE2EDuration="2.858996019s" podCreationTimestamp="2025-11-25 17:07:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:07:35.417592812 +0000 UTC m=+1230.257734907" watchObservedRunningTime="2025-11-25 17:07:35.858996019 +0000 UTC m=+1230.699138114" Nov 25 17:07:36 crc kubenswrapper[4812]: I1125 17:07:36.410189 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"105c4d96-d73f-4b98-b37b-450ddb399152","Type":"ContainerStarted","Data":"ee1d99c7a8a82351382d9bd4a54c8200c545699530b172fe22d2ea8624934d45"} Nov 25 17:07:36 crc kubenswrapper[4812]: I1125 17:07:36.429002 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.4289824749999998 podStartE2EDuration="2.428982475s" podCreationTimestamp="2025-11-25 17:07:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:07:36.42441342 +0000 UTC m=+1231.264555515" watchObservedRunningTime="2025-11-25 17:07:36.428982475 +0000 UTC m=+1231.269124570" Nov 25 17:07:38 crc kubenswrapper[4812]: I1125 17:07:38.725451 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 17:07:38 crc kubenswrapper[4812]: I1125 17:07:38.726069 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 17:07:39 crc kubenswrapper[4812]: I1125 17:07:39.749197 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 25 17:07:40 crc kubenswrapper[4812]: I1125 17:07:40.715414 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 17:07:40 crc kubenswrapper[4812]: I1125 17:07:40.715486 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 17:07:41 crc kubenswrapper[4812]: I1125 17:07:41.729797 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="9ee574ba-7f62-44c5-a6a7-c1a62704638c" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.188:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 17:07:41 crc kubenswrapper[4812]: I1125 17:07:41.729797 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="9ee574ba-7f62-44c5-a6a7-c1a62704638c" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.188:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 17:07:43 crc kubenswrapper[4812]: I1125 17:07:43.726023 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 17:07:43 crc kubenswrapper[4812]: I1125 17:07:43.726090 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 17:07:44 crc kubenswrapper[4812]: I1125 17:07:44.741662 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="9626e4b9-4c10-4d8f-962f-03dbe9bfea89" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.189:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 17:07:44 crc kubenswrapper[4812]: I1125 17:07:44.742217 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="9626e4b9-4c10-4d8f-962f-03dbe9bfea89" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.189:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 17:07:44 crc kubenswrapper[4812]: I1125 17:07:44.748381 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 25 17:07:44 crc kubenswrapper[4812]: I1125 17:07:44.773108 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 25 17:07:45 crc kubenswrapper[4812]: I1125 17:07:45.521651 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 25 17:07:46 crc kubenswrapper[4812]: I1125 17:07:46.510614 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 25 17:07:50 crc kubenswrapper[4812]: I1125 17:07:50.720731 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 17:07:50 crc kubenswrapper[4812]: I1125 17:07:50.721940 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 17:07:50 crc kubenswrapper[4812]: I1125 17:07:50.725632 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 17:07:50 crc kubenswrapper[4812]: I1125 17:07:50.727924 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 17:07:51 crc kubenswrapper[4812]: I1125 17:07:51.539827 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 17:07:51 crc kubenswrapper[4812]: I1125 17:07:51.548991 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 17:07:53 crc kubenswrapper[4812]: I1125 17:07:53.730441 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 25 17:07:53 crc kubenswrapper[4812]: I1125 17:07:53.730903 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 25 17:07:53 crc kubenswrapper[4812]: I1125 17:07:53.735321 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 25 17:07:53 crc kubenswrapper[4812]: I1125 17:07:53.737557 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 25 17:07:57 crc kubenswrapper[4812]: I1125 17:07:57.332781 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:07:57 crc kubenswrapper[4812]: I1125 17:07:57.334044 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:07:57 crc kubenswrapper[4812]: I1125 17:07:57.334099 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" Nov 25 17:07:57 crc kubenswrapper[4812]: I1125 17:07:57.334790 4812 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f4e03a1e42f2ab4e7283089fdd598dd4009c999c3046bed0520d29498108218e"} pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 17:07:57 crc kubenswrapper[4812]: I1125 17:07:57.334843 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" containerID="cri-o://f4e03a1e42f2ab4e7283089fdd598dd4009c999c3046bed0520d29498108218e" gracePeriod=600 Nov 25 17:07:57 crc kubenswrapper[4812]: I1125 17:07:57.601593 4812 generic.go:334] "Generic (PLEG): container finished" podID="8ed911cf-2139-4b12-84ba-af635585ba29" containerID="f4e03a1e42f2ab4e7283089fdd598dd4009c999c3046bed0520d29498108218e" exitCode=0 Nov 25 17:07:57 crc kubenswrapper[4812]: I1125 17:07:57.601638 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" event={"ID":"8ed911cf-2139-4b12-84ba-af635585ba29","Type":"ContainerDied","Data":"f4e03a1e42f2ab4e7283089fdd598dd4009c999c3046bed0520d29498108218e"} Nov 25 17:07:57 crc kubenswrapper[4812]: I1125 17:07:57.601679 4812 scope.go:117] "RemoveContainer" containerID="1342f495637eb94354f1b480bb23cc055dabea0e94c3ee8c3777be3bb44ef47e" Nov 25 17:07:58 crc kubenswrapper[4812]: I1125 17:07:58.612131 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" event={"ID":"8ed911cf-2139-4b12-84ba-af635585ba29","Type":"ContainerStarted","Data":"928bfffc467fb2700fc3f642988d9c74f29633743edcd7e42d0737b45e725dce"} Nov 25 17:08:01 crc kubenswrapper[4812]: I1125 17:08:01.610733 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 17:08:02 crc kubenswrapper[4812]: I1125 17:08:02.596754 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 17:08:05 crc kubenswrapper[4812]: I1125 17:08:05.615620 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="171759d9-0ee5-4a7c-9548-f41d11f0c112" containerName="rabbitmq" containerID="cri-o://2e6144df3f1e766ed6e01582fba38e9961007c093c2391c756576d7d9136298c" gracePeriod=604796 Nov 25 17:08:06 crc kubenswrapper[4812]: I1125 17:08:06.181067 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="c7cd9664-97af-4900-a89e-ee5a790506c4" containerName="rabbitmq" containerID="cri-o://d61f7652fde6c7dd2b063e7c024aca8f8e21e4a4bc236a3d5cc6f40bcc1a1d4e" gracePeriod=604797 Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.157663 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.271389 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="c7cd9664-97af-4900-a89e-ee5a790506c4" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.103:5671: connect: connection refused" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.310372 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9h46q\" (UniqueName: \"kubernetes.io/projected/171759d9-0ee5-4a7c-9548-f41d11f0c112-kube-api-access-9h46q\") pod \"171759d9-0ee5-4a7c-9548-f41d11f0c112\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.310463 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/171759d9-0ee5-4a7c-9548-f41d11f0c112-rabbitmq-erlang-cookie\") pod \"171759d9-0ee5-4a7c-9548-f41d11f0c112\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.310547 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/171759d9-0ee5-4a7c-9548-f41d11f0c112-rabbitmq-tls\") pod \"171759d9-0ee5-4a7c-9548-f41d11f0c112\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.310571 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/171759d9-0ee5-4a7c-9548-f41d11f0c112-pod-info\") pod \"171759d9-0ee5-4a7c-9548-f41d11f0c112\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.310591 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/171759d9-0ee5-4a7c-9548-f41d11f0c112-erlang-cookie-secret\") pod \"171759d9-0ee5-4a7c-9548-f41d11f0c112\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.310640 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/171759d9-0ee5-4a7c-9548-f41d11f0c112-server-conf\") pod \"171759d9-0ee5-4a7c-9548-f41d11f0c112\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.310663 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/171759d9-0ee5-4a7c-9548-f41d11f0c112-rabbitmq-confd\") pod \"171759d9-0ee5-4a7c-9548-f41d11f0c112\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.310703 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/171759d9-0ee5-4a7c-9548-f41d11f0c112-config-data\") pod \"171759d9-0ee5-4a7c-9548-f41d11f0c112\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.310768 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/171759d9-0ee5-4a7c-9548-f41d11f0c112-plugins-conf\") pod \"171759d9-0ee5-4a7c-9548-f41d11f0c112\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.310795 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"171759d9-0ee5-4a7c-9548-f41d11f0c112\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.310818 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/171759d9-0ee5-4a7c-9548-f41d11f0c112-rabbitmq-plugins\") pod \"171759d9-0ee5-4a7c-9548-f41d11f0c112\" (UID: \"171759d9-0ee5-4a7c-9548-f41d11f0c112\") " Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.310936 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/171759d9-0ee5-4a7c-9548-f41d11f0c112-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "171759d9-0ee5-4a7c-9548-f41d11f0c112" (UID: "171759d9-0ee5-4a7c-9548-f41d11f0c112"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.311197 4812 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/171759d9-0ee5-4a7c-9548-f41d11f0c112-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.311245 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/171759d9-0ee5-4a7c-9548-f41d11f0c112-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "171759d9-0ee5-4a7c-9548-f41d11f0c112" (UID: "171759d9-0ee5-4a7c-9548-f41d11f0c112"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.311604 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/171759d9-0ee5-4a7c-9548-f41d11f0c112-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "171759d9-0ee5-4a7c-9548-f41d11f0c112" (UID: "171759d9-0ee5-4a7c-9548-f41d11f0c112"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.316460 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/171759d9-0ee5-4a7c-9548-f41d11f0c112-pod-info" (OuterVolumeSpecName: "pod-info") pod "171759d9-0ee5-4a7c-9548-f41d11f0c112" (UID: "171759d9-0ee5-4a7c-9548-f41d11f0c112"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.316598 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/171759d9-0ee5-4a7c-9548-f41d11f0c112-kube-api-access-9h46q" (OuterVolumeSpecName: "kube-api-access-9h46q") pod "171759d9-0ee5-4a7c-9548-f41d11f0c112" (UID: "171759d9-0ee5-4a7c-9548-f41d11f0c112"). InnerVolumeSpecName "kube-api-access-9h46q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.316809 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "persistence") pod "171759d9-0ee5-4a7c-9548-f41d11f0c112" (UID: "171759d9-0ee5-4a7c-9548-f41d11f0c112"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.317809 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/171759d9-0ee5-4a7c-9548-f41d11f0c112-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "171759d9-0ee5-4a7c-9548-f41d11f0c112" (UID: "171759d9-0ee5-4a7c-9548-f41d11f0c112"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.321795 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/171759d9-0ee5-4a7c-9548-f41d11f0c112-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "171759d9-0ee5-4a7c-9548-f41d11f0c112" (UID: "171759d9-0ee5-4a7c-9548-f41d11f0c112"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.335484 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/171759d9-0ee5-4a7c-9548-f41d11f0c112-config-data" (OuterVolumeSpecName: "config-data") pod "171759d9-0ee5-4a7c-9548-f41d11f0c112" (UID: "171759d9-0ee5-4a7c-9548-f41d11f0c112"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.355823 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/171759d9-0ee5-4a7c-9548-f41d11f0c112-server-conf" (OuterVolumeSpecName: "server-conf") pod "171759d9-0ee5-4a7c-9548-f41d11f0c112" (UID: "171759d9-0ee5-4a7c-9548-f41d11f0c112"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.412673 4812 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/171759d9-0ee5-4a7c-9548-f41d11f0c112-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.412728 4812 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.412745 4812 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/171759d9-0ee5-4a7c-9548-f41d11f0c112-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.412755 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9h46q\" (UniqueName: \"kubernetes.io/projected/171759d9-0ee5-4a7c-9548-f41d11f0c112-kube-api-access-9h46q\") on node \"crc\" DevicePath \"\"" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.412766 4812 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/171759d9-0ee5-4a7c-9548-f41d11f0c112-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.412775 4812 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/171759d9-0ee5-4a7c-9548-f41d11f0c112-pod-info\") on node \"crc\" DevicePath \"\"" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.412782 4812 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/171759d9-0ee5-4a7c-9548-f41d11f0c112-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.412790 4812 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/171759d9-0ee5-4a7c-9548-f41d11f0c112-server-conf\") on node \"crc\" DevicePath \"\"" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.412797 4812 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/171759d9-0ee5-4a7c-9548-f41d11f0c112-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.423813 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/171759d9-0ee5-4a7c-9548-f41d11f0c112-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "171759d9-0ee5-4a7c-9548-f41d11f0c112" (UID: "171759d9-0ee5-4a7c-9548-f41d11f0c112"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.432079 4812 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.514054 4812 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/171759d9-0ee5-4a7c-9548-f41d11f0c112-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.514091 4812 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.673788 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.735821 4812 generic.go:334] "Generic (PLEG): container finished" podID="171759d9-0ee5-4a7c-9548-f41d11f0c112" containerID="2e6144df3f1e766ed6e01582fba38e9961007c093c2391c756576d7d9136298c" exitCode=0 Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.735884 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"171759d9-0ee5-4a7c-9548-f41d11f0c112","Type":"ContainerDied","Data":"2e6144df3f1e766ed6e01582fba38e9961007c093c2391c756576d7d9136298c"} Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.735911 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"171759d9-0ee5-4a7c-9548-f41d11f0c112","Type":"ContainerDied","Data":"6e7cb869af185332b0da0f68f1ddd2afb73a2515d7599f41c070e51ddef00fcb"} Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.735928 4812 scope.go:117] "RemoveContainer" containerID="2e6144df3f1e766ed6e01582fba38e9961007c093c2391c756576d7d9136298c" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.736043 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.764842 4812 generic.go:334] "Generic (PLEG): container finished" podID="c7cd9664-97af-4900-a89e-ee5a790506c4" containerID="d61f7652fde6c7dd2b063e7c024aca8f8e21e4a4bc236a3d5cc6f40bcc1a1d4e" exitCode=0 Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.764896 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c7cd9664-97af-4900-a89e-ee5a790506c4","Type":"ContainerDied","Data":"d61f7652fde6c7dd2b063e7c024aca8f8e21e4a4bc236a3d5cc6f40bcc1a1d4e"} Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.764929 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c7cd9664-97af-4900-a89e-ee5a790506c4","Type":"ContainerDied","Data":"a2a522f0cef12664bedd566719bf84b119f7ce6178e9b8a10bc66be13cd846f6"} Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.765015 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.841627 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.858773 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.874166 4812 scope.go:117] "RemoveContainer" containerID="b0d7547217f860c1a52ffd3dbef85deebcdef33e067ac69682b0cda16ff1953d" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.875476 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c7cd9664-97af-4900-a89e-ee5a790506c4-server-conf\") pod \"c7cd9664-97af-4900-a89e-ee5a790506c4\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.875523 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l98hf\" (UniqueName: \"kubernetes.io/projected/c7cd9664-97af-4900-a89e-ee5a790506c4-kube-api-access-l98hf\") pod \"c7cd9664-97af-4900-a89e-ee5a790506c4\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.875554 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c7cd9664-97af-4900-a89e-ee5a790506c4-rabbitmq-confd\") pod \"c7cd9664-97af-4900-a89e-ee5a790506c4\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.875595 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c7cd9664-97af-4900-a89e-ee5a790506c4-pod-info\") pod \"c7cd9664-97af-4900-a89e-ee5a790506c4\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.875614 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"c7cd9664-97af-4900-a89e-ee5a790506c4\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.875658 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c7cd9664-97af-4900-a89e-ee5a790506c4-rabbitmq-plugins\") pod \"c7cd9664-97af-4900-a89e-ee5a790506c4\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.875689 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c7cd9664-97af-4900-a89e-ee5a790506c4-rabbitmq-tls\") pod \"c7cd9664-97af-4900-a89e-ee5a790506c4\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.875705 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c7cd9664-97af-4900-a89e-ee5a790506c4-rabbitmq-erlang-cookie\") pod \"c7cd9664-97af-4900-a89e-ee5a790506c4\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.875726 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c7cd9664-97af-4900-a89e-ee5a790506c4-erlang-cookie-secret\") pod \"c7cd9664-97af-4900-a89e-ee5a790506c4\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.875750 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c7cd9664-97af-4900-a89e-ee5a790506c4-plugins-conf\") pod \"c7cd9664-97af-4900-a89e-ee5a790506c4\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.875774 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c7cd9664-97af-4900-a89e-ee5a790506c4-config-data\") pod \"c7cd9664-97af-4900-a89e-ee5a790506c4\" (UID: \"c7cd9664-97af-4900-a89e-ee5a790506c4\") " Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.878430 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 17:08:12 crc kubenswrapper[4812]: E1125 17:08:12.878930 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7cd9664-97af-4900-a89e-ee5a790506c4" containerName="setup-container" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.878946 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7cd9664-97af-4900-a89e-ee5a790506c4" containerName="setup-container" Nov 25 17:08:12 crc kubenswrapper[4812]: E1125 17:08:12.878969 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7cd9664-97af-4900-a89e-ee5a790506c4" containerName="rabbitmq" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.878975 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7cd9664-97af-4900-a89e-ee5a790506c4" containerName="rabbitmq" Nov 25 17:08:12 crc kubenswrapper[4812]: E1125 17:08:12.878989 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="171759d9-0ee5-4a7c-9548-f41d11f0c112" containerName="setup-container" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.878995 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="171759d9-0ee5-4a7c-9548-f41d11f0c112" containerName="setup-container" Nov 25 17:08:12 crc kubenswrapper[4812]: E1125 17:08:12.879002 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="171759d9-0ee5-4a7c-9548-f41d11f0c112" containerName="rabbitmq" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.879008 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="171759d9-0ee5-4a7c-9548-f41d11f0c112" containerName="rabbitmq" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.879155 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7cd9664-97af-4900-a89e-ee5a790506c4" containerName="rabbitmq" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.879175 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="171759d9-0ee5-4a7c-9548-f41d11f0c112" containerName="rabbitmq" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.883404 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.884377 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c7cd9664-97af-4900-a89e-ee5a790506c4-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "c7cd9664-97af-4900-a89e-ee5a790506c4" (UID: "c7cd9664-97af-4900-a89e-ee5a790506c4"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.885122 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c7cd9664-97af-4900-a89e-ee5a790506c4-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "c7cd9664-97af-4900-a89e-ee5a790506c4" (UID: "c7cd9664-97af-4900-a89e-ee5a790506c4"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.890401 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-f8qkh" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.890514 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.890635 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.891253 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c7cd9664-97af-4900-a89e-ee5a790506c4-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "c7cd9664-97af-4900-a89e-ee5a790506c4" (UID: "c7cd9664-97af-4900-a89e-ee5a790506c4"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.891347 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.891471 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.891485 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.894610 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.895766 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7cd9664-97af-4900-a89e-ee5a790506c4-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "c7cd9664-97af-4900-a89e-ee5a790506c4" (UID: "c7cd9664-97af-4900-a89e-ee5a790506c4"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.895950 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7cd9664-97af-4900-a89e-ee5a790506c4-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "c7cd9664-97af-4900-a89e-ee5a790506c4" (UID: "c7cd9664-97af-4900-a89e-ee5a790506c4"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.897011 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7cd9664-97af-4900-a89e-ee5a790506c4-kube-api-access-l98hf" (OuterVolumeSpecName: "kube-api-access-l98hf") pod "c7cd9664-97af-4900-a89e-ee5a790506c4" (UID: "c7cd9664-97af-4900-a89e-ee5a790506c4"). InnerVolumeSpecName "kube-api-access-l98hf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.897372 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.897788 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/c7cd9664-97af-4900-a89e-ee5a790506c4-pod-info" (OuterVolumeSpecName: "pod-info") pod "c7cd9664-97af-4900-a89e-ee5a790506c4" (UID: "c7cd9664-97af-4900-a89e-ee5a790506c4"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.915619 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "persistence") pod "c7cd9664-97af-4900-a89e-ee5a790506c4" (UID: "c7cd9664-97af-4900-a89e-ee5a790506c4"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.932628 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c7cd9664-97af-4900-a89e-ee5a790506c4-config-data" (OuterVolumeSpecName: "config-data") pod "c7cd9664-97af-4900-a89e-ee5a790506c4" (UID: "c7cd9664-97af-4900-a89e-ee5a790506c4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.969061 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c7cd9664-97af-4900-a89e-ee5a790506c4-server-conf" (OuterVolumeSpecName: "server-conf") pod "c7cd9664-97af-4900-a89e-ee5a790506c4" (UID: "c7cd9664-97af-4900-a89e-ee5a790506c4"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.978422 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l98hf\" (UniqueName: \"kubernetes.io/projected/c7cd9664-97af-4900-a89e-ee5a790506c4-kube-api-access-l98hf\") on node \"crc\" DevicePath \"\"" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.978510 4812 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c7cd9664-97af-4900-a89e-ee5a790506c4-pod-info\") on node \"crc\" DevicePath \"\"" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.979008 4812 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.979035 4812 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c7cd9664-97af-4900-a89e-ee5a790506c4-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.979047 4812 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c7cd9664-97af-4900-a89e-ee5a790506c4-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.979061 4812 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c7cd9664-97af-4900-a89e-ee5a790506c4-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.979073 4812 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c7cd9664-97af-4900-a89e-ee5a790506c4-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.979083 4812 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c7cd9664-97af-4900-a89e-ee5a790506c4-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.979093 4812 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c7cd9664-97af-4900-a89e-ee5a790506c4-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:08:12 crc kubenswrapper[4812]: I1125 17:08:12.979106 4812 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c7cd9664-97af-4900-a89e-ee5a790506c4-server-conf\") on node \"crc\" DevicePath \"\"" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.002379 4812 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.018352 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7cd9664-97af-4900-a89e-ee5a790506c4-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "c7cd9664-97af-4900-a89e-ee5a790506c4" (UID: "c7cd9664-97af-4900-a89e-ee5a790506c4"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.073585 4812 scope.go:117] "RemoveContainer" containerID="2e6144df3f1e766ed6e01582fba38e9961007c093c2391c756576d7d9136298c" Nov 25 17:08:13 crc kubenswrapper[4812]: E1125 17:08:13.074061 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2e6144df3f1e766ed6e01582fba38e9961007c093c2391c756576d7d9136298c\": container with ID starting with 2e6144df3f1e766ed6e01582fba38e9961007c093c2391c756576d7d9136298c not found: ID does not exist" containerID="2e6144df3f1e766ed6e01582fba38e9961007c093c2391c756576d7d9136298c" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.074102 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2e6144df3f1e766ed6e01582fba38e9961007c093c2391c756576d7d9136298c"} err="failed to get container status \"2e6144df3f1e766ed6e01582fba38e9961007c093c2391c756576d7d9136298c\": rpc error: code = NotFound desc = could not find container \"2e6144df3f1e766ed6e01582fba38e9961007c093c2391c756576d7d9136298c\": container with ID starting with 2e6144df3f1e766ed6e01582fba38e9961007c093c2391c756576d7d9136298c not found: ID does not exist" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.074129 4812 scope.go:117] "RemoveContainer" containerID="b0d7547217f860c1a52ffd3dbef85deebcdef33e067ac69682b0cda16ff1953d" Nov 25 17:08:13 crc kubenswrapper[4812]: E1125 17:08:13.074627 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b0d7547217f860c1a52ffd3dbef85deebcdef33e067ac69682b0cda16ff1953d\": container with ID starting with b0d7547217f860c1a52ffd3dbef85deebcdef33e067ac69682b0cda16ff1953d not found: ID does not exist" containerID="b0d7547217f860c1a52ffd3dbef85deebcdef33e067ac69682b0cda16ff1953d" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.074669 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b0d7547217f860c1a52ffd3dbef85deebcdef33e067ac69682b0cda16ff1953d"} err="failed to get container status \"b0d7547217f860c1a52ffd3dbef85deebcdef33e067ac69682b0cda16ff1953d\": rpc error: code = NotFound desc = could not find container \"b0d7547217f860c1a52ffd3dbef85deebcdef33e067ac69682b0cda16ff1953d\": container with ID starting with b0d7547217f860c1a52ffd3dbef85deebcdef33e067ac69682b0cda16ff1953d not found: ID does not exist" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.074697 4812 scope.go:117] "RemoveContainer" containerID="d61f7652fde6c7dd2b063e7c024aca8f8e21e4a4bc236a3d5cc6f40bcc1a1d4e" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.080935 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/19ea6ea2-55a4-42a9-b5c2-25cf5c486b10-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"19ea6ea2-55a4-42a9-b5c2-25cf5c486b10\") " pod="openstack/rabbitmq-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.080978 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/19ea6ea2-55a4-42a9-b5c2-25cf5c486b10-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"19ea6ea2-55a4-42a9-b5c2-25cf5c486b10\") " pod="openstack/rabbitmq-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.081000 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/19ea6ea2-55a4-42a9-b5c2-25cf5c486b10-config-data\") pod \"rabbitmq-server-0\" (UID: \"19ea6ea2-55a4-42a9-b5c2-25cf5c486b10\") " pod="openstack/rabbitmq-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.081014 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/19ea6ea2-55a4-42a9-b5c2-25cf5c486b10-pod-info\") pod \"rabbitmq-server-0\" (UID: \"19ea6ea2-55a4-42a9-b5c2-25cf5c486b10\") " pod="openstack/rabbitmq-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.081027 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/19ea6ea2-55a4-42a9-b5c2-25cf5c486b10-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"19ea6ea2-55a4-42a9-b5c2-25cf5c486b10\") " pod="openstack/rabbitmq-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.081064 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/19ea6ea2-55a4-42a9-b5c2-25cf5c486b10-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"19ea6ea2-55a4-42a9-b5c2-25cf5c486b10\") " pod="openstack/rabbitmq-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.081080 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/19ea6ea2-55a4-42a9-b5c2-25cf5c486b10-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"19ea6ea2-55a4-42a9-b5c2-25cf5c486b10\") " pod="openstack/rabbitmq-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.081099 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gbjzm\" (UniqueName: \"kubernetes.io/projected/19ea6ea2-55a4-42a9-b5c2-25cf5c486b10-kube-api-access-gbjzm\") pod \"rabbitmq-server-0\" (UID: \"19ea6ea2-55a4-42a9-b5c2-25cf5c486b10\") " pod="openstack/rabbitmq-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.081152 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/19ea6ea2-55a4-42a9-b5c2-25cf5c486b10-server-conf\") pod \"rabbitmq-server-0\" (UID: \"19ea6ea2-55a4-42a9-b5c2-25cf5c486b10\") " pod="openstack/rabbitmq-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.081170 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"19ea6ea2-55a4-42a9-b5c2-25cf5c486b10\") " pod="openstack/rabbitmq-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.081186 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/19ea6ea2-55a4-42a9-b5c2-25cf5c486b10-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"19ea6ea2-55a4-42a9-b5c2-25cf5c486b10\") " pod="openstack/rabbitmq-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.081251 4812 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c7cd9664-97af-4900-a89e-ee5a790506c4-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.081262 4812 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.103204 4812 scope.go:117] "RemoveContainer" containerID="4c5ee131aae86943ba22365f36d2fef1657bf9647c0c6550feecccf1f6e993cb" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.106401 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.114402 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.142855 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.142945 4812 scope.go:117] "RemoveContainer" containerID="d61f7652fde6c7dd2b063e7c024aca8f8e21e4a4bc236a3d5cc6f40bcc1a1d4e" Nov 25 17:08:13 crc kubenswrapper[4812]: E1125 17:08:13.145045 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d61f7652fde6c7dd2b063e7c024aca8f8e21e4a4bc236a3d5cc6f40bcc1a1d4e\": container with ID starting with d61f7652fde6c7dd2b063e7c024aca8f8e21e4a4bc236a3d5cc6f40bcc1a1d4e not found: ID does not exist" containerID="d61f7652fde6c7dd2b063e7c024aca8f8e21e4a4bc236a3d5cc6f40bcc1a1d4e" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.145123 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d61f7652fde6c7dd2b063e7c024aca8f8e21e4a4bc236a3d5cc6f40bcc1a1d4e"} err="failed to get container status \"d61f7652fde6c7dd2b063e7c024aca8f8e21e4a4bc236a3d5cc6f40bcc1a1d4e\": rpc error: code = NotFound desc = could not find container \"d61f7652fde6c7dd2b063e7c024aca8f8e21e4a4bc236a3d5cc6f40bcc1a1d4e\": container with ID starting with d61f7652fde6c7dd2b063e7c024aca8f8e21e4a4bc236a3d5cc6f40bcc1a1d4e not found: ID does not exist" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.145167 4812 scope.go:117] "RemoveContainer" containerID="4c5ee131aae86943ba22365f36d2fef1657bf9647c0c6550feecccf1f6e993cb" Nov 25 17:08:13 crc kubenswrapper[4812]: E1125 17:08:13.145619 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c5ee131aae86943ba22365f36d2fef1657bf9647c0c6550feecccf1f6e993cb\": container with ID starting with 4c5ee131aae86943ba22365f36d2fef1657bf9647c0c6550feecccf1f6e993cb not found: ID does not exist" containerID="4c5ee131aae86943ba22365f36d2fef1657bf9647c0c6550feecccf1f6e993cb" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.145652 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c5ee131aae86943ba22365f36d2fef1657bf9647c0c6550feecccf1f6e993cb"} err="failed to get container status \"4c5ee131aae86943ba22365f36d2fef1657bf9647c0c6550feecccf1f6e993cb\": rpc error: code = NotFound desc = could not find container \"4c5ee131aae86943ba22365f36d2fef1657bf9647c0c6550feecccf1f6e993cb\": container with ID starting with 4c5ee131aae86943ba22365f36d2fef1657bf9647c0c6550feecccf1f6e993cb not found: ID does not exist" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.147626 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.155929 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.155929 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.156085 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.156244 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-f7vvj" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.156296 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.156431 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.156559 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.158420 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.182834 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gbjzm\" (UniqueName: \"kubernetes.io/projected/19ea6ea2-55a4-42a9-b5c2-25cf5c486b10-kube-api-access-gbjzm\") pod \"rabbitmq-server-0\" (UID: \"19ea6ea2-55a4-42a9-b5c2-25cf5c486b10\") " pod="openstack/rabbitmq-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.182930 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/19ea6ea2-55a4-42a9-b5c2-25cf5c486b10-server-conf\") pod \"rabbitmq-server-0\" (UID: \"19ea6ea2-55a4-42a9-b5c2-25cf5c486b10\") " pod="openstack/rabbitmq-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.182956 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"19ea6ea2-55a4-42a9-b5c2-25cf5c486b10\") " pod="openstack/rabbitmq-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.182973 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/19ea6ea2-55a4-42a9-b5c2-25cf5c486b10-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"19ea6ea2-55a4-42a9-b5c2-25cf5c486b10\") " pod="openstack/rabbitmq-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.183027 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/19ea6ea2-55a4-42a9-b5c2-25cf5c486b10-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"19ea6ea2-55a4-42a9-b5c2-25cf5c486b10\") " pod="openstack/rabbitmq-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.183047 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/19ea6ea2-55a4-42a9-b5c2-25cf5c486b10-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"19ea6ea2-55a4-42a9-b5c2-25cf5c486b10\") " pod="openstack/rabbitmq-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.183064 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/19ea6ea2-55a4-42a9-b5c2-25cf5c486b10-config-data\") pod \"rabbitmq-server-0\" (UID: \"19ea6ea2-55a4-42a9-b5c2-25cf5c486b10\") " pod="openstack/rabbitmq-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.183078 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/19ea6ea2-55a4-42a9-b5c2-25cf5c486b10-pod-info\") pod \"rabbitmq-server-0\" (UID: \"19ea6ea2-55a4-42a9-b5c2-25cf5c486b10\") " pod="openstack/rabbitmq-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.183095 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/19ea6ea2-55a4-42a9-b5c2-25cf5c486b10-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"19ea6ea2-55a4-42a9-b5c2-25cf5c486b10\") " pod="openstack/rabbitmq-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.183133 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/19ea6ea2-55a4-42a9-b5c2-25cf5c486b10-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"19ea6ea2-55a4-42a9-b5c2-25cf5c486b10\") " pod="openstack/rabbitmq-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.183148 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/19ea6ea2-55a4-42a9-b5c2-25cf5c486b10-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"19ea6ea2-55a4-42a9-b5c2-25cf5c486b10\") " pod="openstack/rabbitmq-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.183892 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/19ea6ea2-55a4-42a9-b5c2-25cf5c486b10-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"19ea6ea2-55a4-42a9-b5c2-25cf5c486b10\") " pod="openstack/rabbitmq-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.184403 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/19ea6ea2-55a4-42a9-b5c2-25cf5c486b10-server-conf\") pod \"rabbitmq-server-0\" (UID: \"19ea6ea2-55a4-42a9-b5c2-25cf5c486b10\") " pod="openstack/rabbitmq-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.184522 4812 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"19ea6ea2-55a4-42a9-b5c2-25cf5c486b10\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/rabbitmq-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.184589 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/19ea6ea2-55a4-42a9-b5c2-25cf5c486b10-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"19ea6ea2-55a4-42a9-b5c2-25cf5c486b10\") " pod="openstack/rabbitmq-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.184439 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/19ea6ea2-55a4-42a9-b5c2-25cf5c486b10-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"19ea6ea2-55a4-42a9-b5c2-25cf5c486b10\") " pod="openstack/rabbitmq-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.186307 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/19ea6ea2-55a4-42a9-b5c2-25cf5c486b10-config-data\") pod \"rabbitmq-server-0\" (UID: \"19ea6ea2-55a4-42a9-b5c2-25cf5c486b10\") " pod="openstack/rabbitmq-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.188278 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/19ea6ea2-55a4-42a9-b5c2-25cf5c486b10-pod-info\") pod \"rabbitmq-server-0\" (UID: \"19ea6ea2-55a4-42a9-b5c2-25cf5c486b10\") " pod="openstack/rabbitmq-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.188339 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/19ea6ea2-55a4-42a9-b5c2-25cf5c486b10-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"19ea6ea2-55a4-42a9-b5c2-25cf5c486b10\") " pod="openstack/rabbitmq-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.190374 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/19ea6ea2-55a4-42a9-b5c2-25cf5c486b10-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"19ea6ea2-55a4-42a9-b5c2-25cf5c486b10\") " pod="openstack/rabbitmq-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.190482 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/19ea6ea2-55a4-42a9-b5c2-25cf5c486b10-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"19ea6ea2-55a4-42a9-b5c2-25cf5c486b10\") " pod="openstack/rabbitmq-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.199492 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gbjzm\" (UniqueName: \"kubernetes.io/projected/19ea6ea2-55a4-42a9-b5c2-25cf5c486b10-kube-api-access-gbjzm\") pod \"rabbitmq-server-0\" (UID: \"19ea6ea2-55a4-42a9-b5c2-25cf5c486b10\") " pod="openstack/rabbitmq-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.230990 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"19ea6ea2-55a4-42a9-b5c2-25cf5c486b10\") " pod="openstack/rabbitmq-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.284647 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/08e85ac8-ef2c-4220-a08a-83a390cfce7e-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"08e85ac8-ef2c-4220-a08a-83a390cfce7e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.284729 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/08e85ac8-ef2c-4220-a08a-83a390cfce7e-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"08e85ac8-ef2c-4220-a08a-83a390cfce7e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.284834 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/08e85ac8-ef2c-4220-a08a-83a390cfce7e-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"08e85ac8-ef2c-4220-a08a-83a390cfce7e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.284870 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/08e85ac8-ef2c-4220-a08a-83a390cfce7e-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"08e85ac8-ef2c-4220-a08a-83a390cfce7e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.284917 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5n4dr\" (UniqueName: \"kubernetes.io/projected/08e85ac8-ef2c-4220-a08a-83a390cfce7e-kube-api-access-5n4dr\") pod \"rabbitmq-cell1-server-0\" (UID: \"08e85ac8-ef2c-4220-a08a-83a390cfce7e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.284945 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/08e85ac8-ef2c-4220-a08a-83a390cfce7e-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"08e85ac8-ef2c-4220-a08a-83a390cfce7e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.284970 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/08e85ac8-ef2c-4220-a08a-83a390cfce7e-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"08e85ac8-ef2c-4220-a08a-83a390cfce7e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.285027 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"08e85ac8-ef2c-4220-a08a-83a390cfce7e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.285052 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/08e85ac8-ef2c-4220-a08a-83a390cfce7e-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"08e85ac8-ef2c-4220-a08a-83a390cfce7e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.285110 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/08e85ac8-ef2c-4220-a08a-83a390cfce7e-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"08e85ac8-ef2c-4220-a08a-83a390cfce7e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.285147 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/08e85ac8-ef2c-4220-a08a-83a390cfce7e-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"08e85ac8-ef2c-4220-a08a-83a390cfce7e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.380918 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.386722 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/08e85ac8-ef2c-4220-a08a-83a390cfce7e-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"08e85ac8-ef2c-4220-a08a-83a390cfce7e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.386779 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/08e85ac8-ef2c-4220-a08a-83a390cfce7e-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"08e85ac8-ef2c-4220-a08a-83a390cfce7e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.386825 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5n4dr\" (UniqueName: \"kubernetes.io/projected/08e85ac8-ef2c-4220-a08a-83a390cfce7e-kube-api-access-5n4dr\") pod \"rabbitmq-cell1-server-0\" (UID: \"08e85ac8-ef2c-4220-a08a-83a390cfce7e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.386857 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/08e85ac8-ef2c-4220-a08a-83a390cfce7e-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"08e85ac8-ef2c-4220-a08a-83a390cfce7e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.386877 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/08e85ac8-ef2c-4220-a08a-83a390cfce7e-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"08e85ac8-ef2c-4220-a08a-83a390cfce7e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.386908 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"08e85ac8-ef2c-4220-a08a-83a390cfce7e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.386929 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/08e85ac8-ef2c-4220-a08a-83a390cfce7e-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"08e85ac8-ef2c-4220-a08a-83a390cfce7e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.386983 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/08e85ac8-ef2c-4220-a08a-83a390cfce7e-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"08e85ac8-ef2c-4220-a08a-83a390cfce7e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.387017 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/08e85ac8-ef2c-4220-a08a-83a390cfce7e-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"08e85ac8-ef2c-4220-a08a-83a390cfce7e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.387082 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/08e85ac8-ef2c-4220-a08a-83a390cfce7e-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"08e85ac8-ef2c-4220-a08a-83a390cfce7e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.387118 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/08e85ac8-ef2c-4220-a08a-83a390cfce7e-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"08e85ac8-ef2c-4220-a08a-83a390cfce7e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.387922 4812 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"08e85ac8-ef2c-4220-a08a-83a390cfce7e\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.388332 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/08e85ac8-ef2c-4220-a08a-83a390cfce7e-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"08e85ac8-ef2c-4220-a08a-83a390cfce7e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.388389 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/08e85ac8-ef2c-4220-a08a-83a390cfce7e-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"08e85ac8-ef2c-4220-a08a-83a390cfce7e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.388509 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/08e85ac8-ef2c-4220-a08a-83a390cfce7e-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"08e85ac8-ef2c-4220-a08a-83a390cfce7e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.388745 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/08e85ac8-ef2c-4220-a08a-83a390cfce7e-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"08e85ac8-ef2c-4220-a08a-83a390cfce7e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.389449 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/08e85ac8-ef2c-4220-a08a-83a390cfce7e-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"08e85ac8-ef2c-4220-a08a-83a390cfce7e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.391604 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/08e85ac8-ef2c-4220-a08a-83a390cfce7e-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"08e85ac8-ef2c-4220-a08a-83a390cfce7e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.391680 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/08e85ac8-ef2c-4220-a08a-83a390cfce7e-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"08e85ac8-ef2c-4220-a08a-83a390cfce7e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.391929 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/08e85ac8-ef2c-4220-a08a-83a390cfce7e-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"08e85ac8-ef2c-4220-a08a-83a390cfce7e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.392252 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/08e85ac8-ef2c-4220-a08a-83a390cfce7e-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"08e85ac8-ef2c-4220-a08a-83a390cfce7e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.407286 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5n4dr\" (UniqueName: \"kubernetes.io/projected/08e85ac8-ef2c-4220-a08a-83a390cfce7e-kube-api-access-5n4dr\") pod \"rabbitmq-cell1-server-0\" (UID: \"08e85ac8-ef2c-4220-a08a-83a390cfce7e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.417390 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"08e85ac8-ef2c-4220-a08a-83a390cfce7e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.471220 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.850655 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="171759d9-0ee5-4a7c-9548-f41d11f0c112" path="/var/lib/kubelet/pods/171759d9-0ee5-4a7c-9548-f41d11f0c112/volumes" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.851806 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c7cd9664-97af-4900-a89e-ee5a790506c4" path="/var/lib/kubelet/pods/c7cd9664-97af-4900-a89e-ee5a790506c4/volumes" Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.852591 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 17:08:13 crc kubenswrapper[4812]: I1125 17:08:13.938471 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 17:08:13 crc kubenswrapper[4812]: W1125 17:08:13.938807 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod08e85ac8_ef2c_4220_a08a_83a390cfce7e.slice/crio-a44eec8861d8fe7fe5ba53354fdc130f65b3e5eba2961b5d1a3139830fc88953 WatchSource:0}: Error finding container a44eec8861d8fe7fe5ba53354fdc130f65b3e5eba2961b5d1a3139830fc88953: Status 404 returned error can't find the container with id a44eec8861d8fe7fe5ba53354fdc130f65b3e5eba2961b5d1a3139830fc88953 Nov 25 17:08:14 crc kubenswrapper[4812]: I1125 17:08:14.786123 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"19ea6ea2-55a4-42a9-b5c2-25cf5c486b10","Type":"ContainerStarted","Data":"7550c845a8a128cde5d8d9162023456792e78872f20696c5b83cde20868fabfd"} Nov 25 17:08:14 crc kubenswrapper[4812]: I1125 17:08:14.788275 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"08e85ac8-ef2c-4220-a08a-83a390cfce7e","Type":"ContainerStarted","Data":"a44eec8861d8fe7fe5ba53354fdc130f65b3e5eba2961b5d1a3139830fc88953"} Nov 25 17:08:15 crc kubenswrapper[4812]: I1125 17:08:15.799882 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"19ea6ea2-55a4-42a9-b5c2-25cf5c486b10","Type":"ContainerStarted","Data":"84c6b43c201d52ba3ccda5dd8e3509a9c6ed9832b0934db81e601aca18dd2ec7"} Nov 25 17:08:15 crc kubenswrapper[4812]: I1125 17:08:15.802167 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"08e85ac8-ef2c-4220-a08a-83a390cfce7e","Type":"ContainerStarted","Data":"520196cc531ce1c22b120b1f38f35b83535f1f292c81cededee336e6101c7080"} Nov 25 17:08:16 crc kubenswrapper[4812]: I1125 17:08:16.352753 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6447ccbd8f-q5d2f"] Nov 25 17:08:16 crc kubenswrapper[4812]: I1125 17:08:16.354899 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6447ccbd8f-q5d2f" Nov 25 17:08:16 crc kubenswrapper[4812]: I1125 17:08:16.360687 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Nov 25 17:08:16 crc kubenswrapper[4812]: I1125 17:08:16.370850 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6447ccbd8f-q5d2f"] Nov 25 17:08:16 crc kubenswrapper[4812]: I1125 17:08:16.443438 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/bedcebdc-5887-451b-8ae8-f4b8d792e302-openstack-edpm-ipam\") pod \"dnsmasq-dns-6447ccbd8f-q5d2f\" (UID: \"bedcebdc-5887-451b-8ae8-f4b8d792e302\") " pod="openstack/dnsmasq-dns-6447ccbd8f-q5d2f" Nov 25 17:08:16 crc kubenswrapper[4812]: I1125 17:08:16.443644 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bedcebdc-5887-451b-8ae8-f4b8d792e302-config\") pod \"dnsmasq-dns-6447ccbd8f-q5d2f\" (UID: \"bedcebdc-5887-451b-8ae8-f4b8d792e302\") " pod="openstack/dnsmasq-dns-6447ccbd8f-q5d2f" Nov 25 17:08:16 crc kubenswrapper[4812]: I1125 17:08:16.443773 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bedcebdc-5887-451b-8ae8-f4b8d792e302-ovsdbserver-sb\") pod \"dnsmasq-dns-6447ccbd8f-q5d2f\" (UID: \"bedcebdc-5887-451b-8ae8-f4b8d792e302\") " pod="openstack/dnsmasq-dns-6447ccbd8f-q5d2f" Nov 25 17:08:16 crc kubenswrapper[4812]: I1125 17:08:16.443980 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h4dqn\" (UniqueName: \"kubernetes.io/projected/bedcebdc-5887-451b-8ae8-f4b8d792e302-kube-api-access-h4dqn\") pod \"dnsmasq-dns-6447ccbd8f-q5d2f\" (UID: \"bedcebdc-5887-451b-8ae8-f4b8d792e302\") " pod="openstack/dnsmasq-dns-6447ccbd8f-q5d2f" Nov 25 17:08:16 crc kubenswrapper[4812]: I1125 17:08:16.444047 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bedcebdc-5887-451b-8ae8-f4b8d792e302-ovsdbserver-nb\") pod \"dnsmasq-dns-6447ccbd8f-q5d2f\" (UID: \"bedcebdc-5887-451b-8ae8-f4b8d792e302\") " pod="openstack/dnsmasq-dns-6447ccbd8f-q5d2f" Nov 25 17:08:16 crc kubenswrapper[4812]: I1125 17:08:16.444081 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bedcebdc-5887-451b-8ae8-f4b8d792e302-dns-svc\") pod \"dnsmasq-dns-6447ccbd8f-q5d2f\" (UID: \"bedcebdc-5887-451b-8ae8-f4b8d792e302\") " pod="openstack/dnsmasq-dns-6447ccbd8f-q5d2f" Nov 25 17:08:16 crc kubenswrapper[4812]: I1125 17:08:16.545418 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h4dqn\" (UniqueName: \"kubernetes.io/projected/bedcebdc-5887-451b-8ae8-f4b8d792e302-kube-api-access-h4dqn\") pod \"dnsmasq-dns-6447ccbd8f-q5d2f\" (UID: \"bedcebdc-5887-451b-8ae8-f4b8d792e302\") " pod="openstack/dnsmasq-dns-6447ccbd8f-q5d2f" Nov 25 17:08:16 crc kubenswrapper[4812]: I1125 17:08:16.545481 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bedcebdc-5887-451b-8ae8-f4b8d792e302-ovsdbserver-nb\") pod \"dnsmasq-dns-6447ccbd8f-q5d2f\" (UID: \"bedcebdc-5887-451b-8ae8-f4b8d792e302\") " pod="openstack/dnsmasq-dns-6447ccbd8f-q5d2f" Nov 25 17:08:16 crc kubenswrapper[4812]: I1125 17:08:16.545505 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bedcebdc-5887-451b-8ae8-f4b8d792e302-dns-svc\") pod \"dnsmasq-dns-6447ccbd8f-q5d2f\" (UID: \"bedcebdc-5887-451b-8ae8-f4b8d792e302\") " pod="openstack/dnsmasq-dns-6447ccbd8f-q5d2f" Nov 25 17:08:16 crc kubenswrapper[4812]: I1125 17:08:16.545580 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/bedcebdc-5887-451b-8ae8-f4b8d792e302-openstack-edpm-ipam\") pod \"dnsmasq-dns-6447ccbd8f-q5d2f\" (UID: \"bedcebdc-5887-451b-8ae8-f4b8d792e302\") " pod="openstack/dnsmasq-dns-6447ccbd8f-q5d2f" Nov 25 17:08:16 crc kubenswrapper[4812]: I1125 17:08:16.545623 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bedcebdc-5887-451b-8ae8-f4b8d792e302-config\") pod \"dnsmasq-dns-6447ccbd8f-q5d2f\" (UID: \"bedcebdc-5887-451b-8ae8-f4b8d792e302\") " pod="openstack/dnsmasq-dns-6447ccbd8f-q5d2f" Nov 25 17:08:16 crc kubenswrapper[4812]: I1125 17:08:16.545647 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bedcebdc-5887-451b-8ae8-f4b8d792e302-ovsdbserver-sb\") pod \"dnsmasq-dns-6447ccbd8f-q5d2f\" (UID: \"bedcebdc-5887-451b-8ae8-f4b8d792e302\") " pod="openstack/dnsmasq-dns-6447ccbd8f-q5d2f" Nov 25 17:08:16 crc kubenswrapper[4812]: I1125 17:08:16.546767 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bedcebdc-5887-451b-8ae8-f4b8d792e302-ovsdbserver-sb\") pod \"dnsmasq-dns-6447ccbd8f-q5d2f\" (UID: \"bedcebdc-5887-451b-8ae8-f4b8d792e302\") " pod="openstack/dnsmasq-dns-6447ccbd8f-q5d2f" Nov 25 17:08:16 crc kubenswrapper[4812]: I1125 17:08:16.546816 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/bedcebdc-5887-451b-8ae8-f4b8d792e302-openstack-edpm-ipam\") pod \"dnsmasq-dns-6447ccbd8f-q5d2f\" (UID: \"bedcebdc-5887-451b-8ae8-f4b8d792e302\") " pod="openstack/dnsmasq-dns-6447ccbd8f-q5d2f" Nov 25 17:08:16 crc kubenswrapper[4812]: I1125 17:08:16.546840 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bedcebdc-5887-451b-8ae8-f4b8d792e302-dns-svc\") pod \"dnsmasq-dns-6447ccbd8f-q5d2f\" (UID: \"bedcebdc-5887-451b-8ae8-f4b8d792e302\") " pod="openstack/dnsmasq-dns-6447ccbd8f-q5d2f" Nov 25 17:08:16 crc kubenswrapper[4812]: I1125 17:08:16.546915 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bedcebdc-5887-451b-8ae8-f4b8d792e302-config\") pod \"dnsmasq-dns-6447ccbd8f-q5d2f\" (UID: \"bedcebdc-5887-451b-8ae8-f4b8d792e302\") " pod="openstack/dnsmasq-dns-6447ccbd8f-q5d2f" Nov 25 17:08:16 crc kubenswrapper[4812]: I1125 17:08:16.549857 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bedcebdc-5887-451b-8ae8-f4b8d792e302-ovsdbserver-nb\") pod \"dnsmasq-dns-6447ccbd8f-q5d2f\" (UID: \"bedcebdc-5887-451b-8ae8-f4b8d792e302\") " pod="openstack/dnsmasq-dns-6447ccbd8f-q5d2f" Nov 25 17:08:16 crc kubenswrapper[4812]: I1125 17:08:16.564600 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h4dqn\" (UniqueName: \"kubernetes.io/projected/bedcebdc-5887-451b-8ae8-f4b8d792e302-kube-api-access-h4dqn\") pod \"dnsmasq-dns-6447ccbd8f-q5d2f\" (UID: \"bedcebdc-5887-451b-8ae8-f4b8d792e302\") " pod="openstack/dnsmasq-dns-6447ccbd8f-q5d2f" Nov 25 17:08:16 crc kubenswrapper[4812]: I1125 17:08:16.675082 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6447ccbd8f-q5d2f" Nov 25 17:08:17 crc kubenswrapper[4812]: I1125 17:08:17.152601 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6447ccbd8f-q5d2f"] Nov 25 17:08:17 crc kubenswrapper[4812]: I1125 17:08:17.825252 4812 generic.go:334] "Generic (PLEG): container finished" podID="bedcebdc-5887-451b-8ae8-f4b8d792e302" containerID="4d35ec5add08c83e85785b8d7f5f0dda6e88a476f5801086e10ca9c85f60d5e3" exitCode=0 Nov 25 17:08:17 crc kubenswrapper[4812]: I1125 17:08:17.825301 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6447ccbd8f-q5d2f" event={"ID":"bedcebdc-5887-451b-8ae8-f4b8d792e302","Type":"ContainerDied","Data":"4d35ec5add08c83e85785b8d7f5f0dda6e88a476f5801086e10ca9c85f60d5e3"} Nov 25 17:08:17 crc kubenswrapper[4812]: I1125 17:08:17.825697 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6447ccbd8f-q5d2f" event={"ID":"bedcebdc-5887-451b-8ae8-f4b8d792e302","Type":"ContainerStarted","Data":"480851eb4149f75fe96c3cbb1f48a01adf92bf13651651bf1358813819527f9c"} Nov 25 17:08:18 crc kubenswrapper[4812]: I1125 17:08:18.835943 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6447ccbd8f-q5d2f" event={"ID":"bedcebdc-5887-451b-8ae8-f4b8d792e302","Type":"ContainerStarted","Data":"556c3218f44ecd241600044d8b457944fdef54e2c32564309543772533230e71"} Nov 25 17:08:18 crc kubenswrapper[4812]: I1125 17:08:18.836219 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6447ccbd8f-q5d2f" Nov 25 17:08:18 crc kubenswrapper[4812]: I1125 17:08:18.855068 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6447ccbd8f-q5d2f" podStartSLOduration=2.8550493599999998 podStartE2EDuration="2.85504936s" podCreationTimestamp="2025-11-25 17:08:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:08:18.850885458 +0000 UTC m=+1273.691027553" watchObservedRunningTime="2025-11-25 17:08:18.85504936 +0000 UTC m=+1273.695191455" Nov 25 17:08:26 crc kubenswrapper[4812]: I1125 17:08:26.676477 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6447ccbd8f-q5d2f" Nov 25 17:08:26 crc kubenswrapper[4812]: I1125 17:08:26.722319 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b856c5697-n2rck"] Nov 25 17:08:26 crc kubenswrapper[4812]: I1125 17:08:26.722568 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5b856c5697-n2rck" podUID="8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf" containerName="dnsmasq-dns" containerID="cri-o://f2ab7e5ed96da37f8e888ff2ad167ba4f5893a9c850c188eb7257b1f248b74c7" gracePeriod=10 Nov 25 17:08:26 crc kubenswrapper[4812]: I1125 17:08:26.912337 4812 generic.go:334] "Generic (PLEG): container finished" podID="8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf" containerID="f2ab7e5ed96da37f8e888ff2ad167ba4f5893a9c850c188eb7257b1f248b74c7" exitCode=0 Nov 25 17:08:26 crc kubenswrapper[4812]: I1125 17:08:26.912380 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b856c5697-n2rck" event={"ID":"8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf","Type":"ContainerDied","Data":"f2ab7e5ed96da37f8e888ff2ad167ba4f5893a9c850c188eb7257b1f248b74c7"} Nov 25 17:08:26 crc kubenswrapper[4812]: I1125 17:08:26.945652 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-864d5fc68c-fcqj5"] Nov 25 17:08:26 crc kubenswrapper[4812]: I1125 17:08:26.947155 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-864d5fc68c-fcqj5" Nov 25 17:08:26 crc kubenswrapper[4812]: I1125 17:08:26.958634 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-864d5fc68c-fcqj5"] Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.037919 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/ad724bc1-eeea-44a3-bd54-b2247235f111-openstack-edpm-ipam\") pod \"dnsmasq-dns-864d5fc68c-fcqj5\" (UID: \"ad724bc1-eeea-44a3-bd54-b2247235f111\") " pod="openstack/dnsmasq-dns-864d5fc68c-fcqj5" Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.037981 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ad724bc1-eeea-44a3-bd54-b2247235f111-dns-svc\") pod \"dnsmasq-dns-864d5fc68c-fcqj5\" (UID: \"ad724bc1-eeea-44a3-bd54-b2247235f111\") " pod="openstack/dnsmasq-dns-864d5fc68c-fcqj5" Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.038020 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ad724bc1-eeea-44a3-bd54-b2247235f111-ovsdbserver-nb\") pod \"dnsmasq-dns-864d5fc68c-fcqj5\" (UID: \"ad724bc1-eeea-44a3-bd54-b2247235f111\") " pod="openstack/dnsmasq-dns-864d5fc68c-fcqj5" Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.038192 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8g5kl\" (UniqueName: \"kubernetes.io/projected/ad724bc1-eeea-44a3-bd54-b2247235f111-kube-api-access-8g5kl\") pod \"dnsmasq-dns-864d5fc68c-fcqj5\" (UID: \"ad724bc1-eeea-44a3-bd54-b2247235f111\") " pod="openstack/dnsmasq-dns-864d5fc68c-fcqj5" Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.038263 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ad724bc1-eeea-44a3-bd54-b2247235f111-config\") pod \"dnsmasq-dns-864d5fc68c-fcqj5\" (UID: \"ad724bc1-eeea-44a3-bd54-b2247235f111\") " pod="openstack/dnsmasq-dns-864d5fc68c-fcqj5" Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.038349 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ad724bc1-eeea-44a3-bd54-b2247235f111-ovsdbserver-sb\") pod \"dnsmasq-dns-864d5fc68c-fcqj5\" (UID: \"ad724bc1-eeea-44a3-bd54-b2247235f111\") " pod="openstack/dnsmasq-dns-864d5fc68c-fcqj5" Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.139604 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8g5kl\" (UniqueName: \"kubernetes.io/projected/ad724bc1-eeea-44a3-bd54-b2247235f111-kube-api-access-8g5kl\") pod \"dnsmasq-dns-864d5fc68c-fcqj5\" (UID: \"ad724bc1-eeea-44a3-bd54-b2247235f111\") " pod="openstack/dnsmasq-dns-864d5fc68c-fcqj5" Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.139667 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ad724bc1-eeea-44a3-bd54-b2247235f111-config\") pod \"dnsmasq-dns-864d5fc68c-fcqj5\" (UID: \"ad724bc1-eeea-44a3-bd54-b2247235f111\") " pod="openstack/dnsmasq-dns-864d5fc68c-fcqj5" Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.139716 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ad724bc1-eeea-44a3-bd54-b2247235f111-ovsdbserver-sb\") pod \"dnsmasq-dns-864d5fc68c-fcqj5\" (UID: \"ad724bc1-eeea-44a3-bd54-b2247235f111\") " pod="openstack/dnsmasq-dns-864d5fc68c-fcqj5" Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.139799 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/ad724bc1-eeea-44a3-bd54-b2247235f111-openstack-edpm-ipam\") pod \"dnsmasq-dns-864d5fc68c-fcqj5\" (UID: \"ad724bc1-eeea-44a3-bd54-b2247235f111\") " pod="openstack/dnsmasq-dns-864d5fc68c-fcqj5" Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.139847 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ad724bc1-eeea-44a3-bd54-b2247235f111-dns-svc\") pod \"dnsmasq-dns-864d5fc68c-fcqj5\" (UID: \"ad724bc1-eeea-44a3-bd54-b2247235f111\") " pod="openstack/dnsmasq-dns-864d5fc68c-fcqj5" Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.139897 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ad724bc1-eeea-44a3-bd54-b2247235f111-ovsdbserver-nb\") pod \"dnsmasq-dns-864d5fc68c-fcqj5\" (UID: \"ad724bc1-eeea-44a3-bd54-b2247235f111\") " pod="openstack/dnsmasq-dns-864d5fc68c-fcqj5" Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.140776 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ad724bc1-eeea-44a3-bd54-b2247235f111-config\") pod \"dnsmasq-dns-864d5fc68c-fcqj5\" (UID: \"ad724bc1-eeea-44a3-bd54-b2247235f111\") " pod="openstack/dnsmasq-dns-864d5fc68c-fcqj5" Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.140771 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ad724bc1-eeea-44a3-bd54-b2247235f111-ovsdbserver-sb\") pod \"dnsmasq-dns-864d5fc68c-fcqj5\" (UID: \"ad724bc1-eeea-44a3-bd54-b2247235f111\") " pod="openstack/dnsmasq-dns-864d5fc68c-fcqj5" Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.141748 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ad724bc1-eeea-44a3-bd54-b2247235f111-ovsdbserver-nb\") pod \"dnsmasq-dns-864d5fc68c-fcqj5\" (UID: \"ad724bc1-eeea-44a3-bd54-b2247235f111\") " pod="openstack/dnsmasq-dns-864d5fc68c-fcqj5" Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.141955 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ad724bc1-eeea-44a3-bd54-b2247235f111-dns-svc\") pod \"dnsmasq-dns-864d5fc68c-fcqj5\" (UID: \"ad724bc1-eeea-44a3-bd54-b2247235f111\") " pod="openstack/dnsmasq-dns-864d5fc68c-fcqj5" Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.142004 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/ad724bc1-eeea-44a3-bd54-b2247235f111-openstack-edpm-ipam\") pod \"dnsmasq-dns-864d5fc68c-fcqj5\" (UID: \"ad724bc1-eeea-44a3-bd54-b2247235f111\") " pod="openstack/dnsmasq-dns-864d5fc68c-fcqj5" Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.160418 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8g5kl\" (UniqueName: \"kubernetes.io/projected/ad724bc1-eeea-44a3-bd54-b2247235f111-kube-api-access-8g5kl\") pod \"dnsmasq-dns-864d5fc68c-fcqj5\" (UID: \"ad724bc1-eeea-44a3-bd54-b2247235f111\") " pod="openstack/dnsmasq-dns-864d5fc68c-fcqj5" Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.268397 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-864d5fc68c-fcqj5" Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.270745 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b856c5697-n2rck" Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.443136 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5gn7f\" (UniqueName: \"kubernetes.io/projected/8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf-kube-api-access-5gn7f\") pod \"8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf\" (UID: \"8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf\") " Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.443580 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf-ovsdbserver-sb\") pod \"8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf\" (UID: \"8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf\") " Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.443611 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf-ovsdbserver-nb\") pod \"8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf\" (UID: \"8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf\") " Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.443729 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf-dns-svc\") pod \"8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf\" (UID: \"8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf\") " Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.443754 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf-config\") pod \"8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf\" (UID: \"8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf\") " Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.447913 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf-kube-api-access-5gn7f" (OuterVolumeSpecName: "kube-api-access-5gn7f") pod "8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf" (UID: "8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf"). InnerVolumeSpecName "kube-api-access-5gn7f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.494142 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf-config" (OuterVolumeSpecName: "config") pod "8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf" (UID: "8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.494271 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf" (UID: "8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.495771 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf" (UID: "8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.497330 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf" (UID: "8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.545850 4812 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.545895 4812 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf-config\") on node \"crc\" DevicePath \"\"" Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.545910 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5gn7f\" (UniqueName: \"kubernetes.io/projected/8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf-kube-api-access-5gn7f\") on node \"crc\" DevicePath \"\"" Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.545926 4812 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.545941 4812 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.719218 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-864d5fc68c-fcqj5"] Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.923710 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-864d5fc68c-fcqj5" event={"ID":"ad724bc1-eeea-44a3-bd54-b2247235f111","Type":"ContainerStarted","Data":"007eb560793e9595c75aed8a8619ce291bf0285007765afdde297432d17fad00"} Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.929283 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b856c5697-n2rck" event={"ID":"8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf","Type":"ContainerDied","Data":"de15b751f0298f5dc9a67a746b7ad5aa315ed7dbbbdfe4ea729cf69f12966d6d"} Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.929332 4812 scope.go:117] "RemoveContainer" containerID="f2ab7e5ed96da37f8e888ff2ad167ba4f5893a9c850c188eb7257b1f248b74c7" Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.929574 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b856c5697-n2rck" Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.956652 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b856c5697-n2rck"] Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.959897 4812 scope.go:117] "RemoveContainer" containerID="316f48bd193b69b47743ad7e4b733cfe55ef0acb930deb21b5121b16ff5f929e" Nov 25 17:08:27 crc kubenswrapper[4812]: I1125 17:08:27.963262 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5b856c5697-n2rck"] Nov 25 17:08:28 crc kubenswrapper[4812]: I1125 17:08:28.938785 4812 generic.go:334] "Generic (PLEG): container finished" podID="ad724bc1-eeea-44a3-bd54-b2247235f111" containerID="cf49f7d4f2ddbc75beb73b290492202c2d7123de28e4170f4ccb26e03f1bbf94" exitCode=0 Nov 25 17:08:28 crc kubenswrapper[4812]: I1125 17:08:28.938836 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-864d5fc68c-fcqj5" event={"ID":"ad724bc1-eeea-44a3-bd54-b2247235f111","Type":"ContainerDied","Data":"cf49f7d4f2ddbc75beb73b290492202c2d7123de28e4170f4ccb26e03f1bbf94"} Nov 25 17:08:29 crc kubenswrapper[4812]: I1125 17:08:29.843648 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf" path="/var/lib/kubelet/pods/8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf/volumes" Nov 25 17:08:29 crc kubenswrapper[4812]: I1125 17:08:29.956520 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-864d5fc68c-fcqj5" event={"ID":"ad724bc1-eeea-44a3-bd54-b2247235f111","Type":"ContainerStarted","Data":"8fed418639925d7655a7402531172fcd9c93199bbad5c44d7f59493b2890a4e0"} Nov 25 17:08:29 crc kubenswrapper[4812]: I1125 17:08:29.956823 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-864d5fc68c-fcqj5" Nov 25 17:08:29 crc kubenswrapper[4812]: I1125 17:08:29.991125 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-864d5fc68c-fcqj5" podStartSLOduration=3.991100158 podStartE2EDuration="3.991100158s" podCreationTimestamp="2025-11-25 17:08:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:08:29.977645255 +0000 UTC m=+1284.817787370" watchObservedRunningTime="2025-11-25 17:08:29.991100158 +0000 UTC m=+1284.831242253" Nov 25 17:08:37 crc kubenswrapper[4812]: I1125 17:08:37.270401 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-864d5fc68c-fcqj5" Nov 25 17:08:37 crc kubenswrapper[4812]: I1125 17:08:37.344155 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6447ccbd8f-q5d2f"] Nov 25 17:08:37 crc kubenswrapper[4812]: I1125 17:08:37.344439 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6447ccbd8f-q5d2f" podUID="bedcebdc-5887-451b-8ae8-f4b8d792e302" containerName="dnsmasq-dns" containerID="cri-o://556c3218f44ecd241600044d8b457944fdef54e2c32564309543772533230e71" gracePeriod=10 Nov 25 17:08:37 crc kubenswrapper[4812]: I1125 17:08:37.877212 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6447ccbd8f-q5d2f" Nov 25 17:08:38 crc kubenswrapper[4812]: I1125 17:08:38.043760 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/bedcebdc-5887-451b-8ae8-f4b8d792e302-openstack-edpm-ipam\") pod \"bedcebdc-5887-451b-8ae8-f4b8d792e302\" (UID: \"bedcebdc-5887-451b-8ae8-f4b8d792e302\") " Nov 25 17:08:38 crc kubenswrapper[4812]: I1125 17:08:38.043877 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h4dqn\" (UniqueName: \"kubernetes.io/projected/bedcebdc-5887-451b-8ae8-f4b8d792e302-kube-api-access-h4dqn\") pod \"bedcebdc-5887-451b-8ae8-f4b8d792e302\" (UID: \"bedcebdc-5887-451b-8ae8-f4b8d792e302\") " Nov 25 17:08:38 crc kubenswrapper[4812]: I1125 17:08:38.044148 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bedcebdc-5887-451b-8ae8-f4b8d792e302-ovsdbserver-nb\") pod \"bedcebdc-5887-451b-8ae8-f4b8d792e302\" (UID: \"bedcebdc-5887-451b-8ae8-f4b8d792e302\") " Nov 25 17:08:38 crc kubenswrapper[4812]: I1125 17:08:38.044194 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bedcebdc-5887-451b-8ae8-f4b8d792e302-dns-svc\") pod \"bedcebdc-5887-451b-8ae8-f4b8d792e302\" (UID: \"bedcebdc-5887-451b-8ae8-f4b8d792e302\") " Nov 25 17:08:38 crc kubenswrapper[4812]: I1125 17:08:38.044311 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bedcebdc-5887-451b-8ae8-f4b8d792e302-config\") pod \"bedcebdc-5887-451b-8ae8-f4b8d792e302\" (UID: \"bedcebdc-5887-451b-8ae8-f4b8d792e302\") " Nov 25 17:08:38 crc kubenswrapper[4812]: I1125 17:08:38.044403 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bedcebdc-5887-451b-8ae8-f4b8d792e302-ovsdbserver-sb\") pod \"bedcebdc-5887-451b-8ae8-f4b8d792e302\" (UID: \"bedcebdc-5887-451b-8ae8-f4b8d792e302\") " Nov 25 17:08:38 crc kubenswrapper[4812]: I1125 17:08:38.047670 4812 generic.go:334] "Generic (PLEG): container finished" podID="bedcebdc-5887-451b-8ae8-f4b8d792e302" containerID="556c3218f44ecd241600044d8b457944fdef54e2c32564309543772533230e71" exitCode=0 Nov 25 17:08:38 crc kubenswrapper[4812]: I1125 17:08:38.048307 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6447ccbd8f-q5d2f" event={"ID":"bedcebdc-5887-451b-8ae8-f4b8d792e302","Type":"ContainerDied","Data":"556c3218f44ecd241600044d8b457944fdef54e2c32564309543772533230e71"} Nov 25 17:08:38 crc kubenswrapper[4812]: I1125 17:08:38.048367 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6447ccbd8f-q5d2f" event={"ID":"bedcebdc-5887-451b-8ae8-f4b8d792e302","Type":"ContainerDied","Data":"480851eb4149f75fe96c3cbb1f48a01adf92bf13651651bf1358813819527f9c"} Nov 25 17:08:38 crc kubenswrapper[4812]: I1125 17:08:38.048394 4812 scope.go:117] "RemoveContainer" containerID="556c3218f44ecd241600044d8b457944fdef54e2c32564309543772533230e71" Nov 25 17:08:38 crc kubenswrapper[4812]: I1125 17:08:38.048729 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6447ccbd8f-q5d2f" Nov 25 17:08:38 crc kubenswrapper[4812]: I1125 17:08:38.056772 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bedcebdc-5887-451b-8ae8-f4b8d792e302-kube-api-access-h4dqn" (OuterVolumeSpecName: "kube-api-access-h4dqn") pod "bedcebdc-5887-451b-8ae8-f4b8d792e302" (UID: "bedcebdc-5887-451b-8ae8-f4b8d792e302"). InnerVolumeSpecName "kube-api-access-h4dqn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:08:38 crc kubenswrapper[4812]: I1125 17:08:38.097564 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bedcebdc-5887-451b-8ae8-f4b8d792e302-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "bedcebdc-5887-451b-8ae8-f4b8d792e302" (UID: "bedcebdc-5887-451b-8ae8-f4b8d792e302"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:08:38 crc kubenswrapper[4812]: I1125 17:08:38.106625 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bedcebdc-5887-451b-8ae8-f4b8d792e302-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "bedcebdc-5887-451b-8ae8-f4b8d792e302" (UID: "bedcebdc-5887-451b-8ae8-f4b8d792e302"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:08:38 crc kubenswrapper[4812]: I1125 17:08:38.107146 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bedcebdc-5887-451b-8ae8-f4b8d792e302-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "bedcebdc-5887-451b-8ae8-f4b8d792e302" (UID: "bedcebdc-5887-451b-8ae8-f4b8d792e302"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:08:38 crc kubenswrapper[4812]: I1125 17:08:38.107983 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bedcebdc-5887-451b-8ae8-f4b8d792e302-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "bedcebdc-5887-451b-8ae8-f4b8d792e302" (UID: "bedcebdc-5887-451b-8ae8-f4b8d792e302"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:08:38 crc kubenswrapper[4812]: I1125 17:08:38.109234 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bedcebdc-5887-451b-8ae8-f4b8d792e302-config" (OuterVolumeSpecName: "config") pod "bedcebdc-5887-451b-8ae8-f4b8d792e302" (UID: "bedcebdc-5887-451b-8ae8-f4b8d792e302"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:08:38 crc kubenswrapper[4812]: I1125 17:08:38.147336 4812 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bedcebdc-5887-451b-8ae8-f4b8d792e302-config\") on node \"crc\" DevicePath \"\"" Nov 25 17:08:38 crc kubenswrapper[4812]: I1125 17:08:38.149021 4812 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bedcebdc-5887-451b-8ae8-f4b8d792e302-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 17:08:38 crc kubenswrapper[4812]: I1125 17:08:38.149074 4812 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/bedcebdc-5887-451b-8ae8-f4b8d792e302-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 25 17:08:38 crc kubenswrapper[4812]: I1125 17:08:38.149085 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h4dqn\" (UniqueName: \"kubernetes.io/projected/bedcebdc-5887-451b-8ae8-f4b8d792e302-kube-api-access-h4dqn\") on node \"crc\" DevicePath \"\"" Nov 25 17:08:38 crc kubenswrapper[4812]: I1125 17:08:38.149094 4812 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bedcebdc-5887-451b-8ae8-f4b8d792e302-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 17:08:38 crc kubenswrapper[4812]: I1125 17:08:38.149103 4812 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bedcebdc-5887-451b-8ae8-f4b8d792e302-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 17:08:38 crc kubenswrapper[4812]: I1125 17:08:38.168045 4812 scope.go:117] "RemoveContainer" containerID="4d35ec5add08c83e85785b8d7f5f0dda6e88a476f5801086e10ca9c85f60d5e3" Nov 25 17:08:38 crc kubenswrapper[4812]: I1125 17:08:38.185184 4812 scope.go:117] "RemoveContainer" containerID="556c3218f44ecd241600044d8b457944fdef54e2c32564309543772533230e71" Nov 25 17:08:38 crc kubenswrapper[4812]: E1125 17:08:38.185498 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"556c3218f44ecd241600044d8b457944fdef54e2c32564309543772533230e71\": container with ID starting with 556c3218f44ecd241600044d8b457944fdef54e2c32564309543772533230e71 not found: ID does not exist" containerID="556c3218f44ecd241600044d8b457944fdef54e2c32564309543772533230e71" Nov 25 17:08:38 crc kubenswrapper[4812]: I1125 17:08:38.185544 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"556c3218f44ecd241600044d8b457944fdef54e2c32564309543772533230e71"} err="failed to get container status \"556c3218f44ecd241600044d8b457944fdef54e2c32564309543772533230e71\": rpc error: code = NotFound desc = could not find container \"556c3218f44ecd241600044d8b457944fdef54e2c32564309543772533230e71\": container with ID starting with 556c3218f44ecd241600044d8b457944fdef54e2c32564309543772533230e71 not found: ID does not exist" Nov 25 17:08:38 crc kubenswrapper[4812]: I1125 17:08:38.185563 4812 scope.go:117] "RemoveContainer" containerID="4d35ec5add08c83e85785b8d7f5f0dda6e88a476f5801086e10ca9c85f60d5e3" Nov 25 17:08:38 crc kubenswrapper[4812]: E1125 17:08:38.185748 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d35ec5add08c83e85785b8d7f5f0dda6e88a476f5801086e10ca9c85f60d5e3\": container with ID starting with 4d35ec5add08c83e85785b8d7f5f0dda6e88a476f5801086e10ca9c85f60d5e3 not found: ID does not exist" containerID="4d35ec5add08c83e85785b8d7f5f0dda6e88a476f5801086e10ca9c85f60d5e3" Nov 25 17:08:38 crc kubenswrapper[4812]: I1125 17:08:38.185770 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d35ec5add08c83e85785b8d7f5f0dda6e88a476f5801086e10ca9c85f60d5e3"} err="failed to get container status \"4d35ec5add08c83e85785b8d7f5f0dda6e88a476f5801086e10ca9c85f60d5e3\": rpc error: code = NotFound desc = could not find container \"4d35ec5add08c83e85785b8d7f5f0dda6e88a476f5801086e10ca9c85f60d5e3\": container with ID starting with 4d35ec5add08c83e85785b8d7f5f0dda6e88a476f5801086e10ca9c85f60d5e3 not found: ID does not exist" Nov 25 17:08:38 crc kubenswrapper[4812]: I1125 17:08:38.379677 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6447ccbd8f-q5d2f"] Nov 25 17:08:38 crc kubenswrapper[4812]: I1125 17:08:38.386681 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6447ccbd8f-q5d2f"] Nov 25 17:08:39 crc kubenswrapper[4812]: I1125 17:08:39.842317 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bedcebdc-5887-451b-8ae8-f4b8d792e302" path="/var/lib/kubelet/pods/bedcebdc-5887-451b-8ae8-f4b8d792e302/volumes" Nov 25 17:08:47 crc kubenswrapper[4812]: I1125 17:08:47.732782 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-vxkbj"] Nov 25 17:08:47 crc kubenswrapper[4812]: E1125 17:08:47.733840 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf" containerName="dnsmasq-dns" Nov 25 17:08:47 crc kubenswrapper[4812]: I1125 17:08:47.733857 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf" containerName="dnsmasq-dns" Nov 25 17:08:47 crc kubenswrapper[4812]: E1125 17:08:47.733880 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf" containerName="init" Nov 25 17:08:47 crc kubenswrapper[4812]: I1125 17:08:47.733888 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf" containerName="init" Nov 25 17:08:47 crc kubenswrapper[4812]: E1125 17:08:47.733926 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bedcebdc-5887-451b-8ae8-f4b8d792e302" containerName="dnsmasq-dns" Nov 25 17:08:47 crc kubenswrapper[4812]: I1125 17:08:47.733934 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="bedcebdc-5887-451b-8ae8-f4b8d792e302" containerName="dnsmasq-dns" Nov 25 17:08:47 crc kubenswrapper[4812]: E1125 17:08:47.733951 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bedcebdc-5887-451b-8ae8-f4b8d792e302" containerName="init" Nov 25 17:08:47 crc kubenswrapper[4812]: I1125 17:08:47.733959 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="bedcebdc-5887-451b-8ae8-f4b8d792e302" containerName="init" Nov 25 17:08:47 crc kubenswrapper[4812]: I1125 17:08:47.734214 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="8051fb60-7d6e-4a2a-ac2d-eb6974a55dcf" containerName="dnsmasq-dns" Nov 25 17:08:47 crc kubenswrapper[4812]: I1125 17:08:47.734238 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="bedcebdc-5887-451b-8ae8-f4b8d792e302" containerName="dnsmasq-dns" Nov 25 17:08:47 crc kubenswrapper[4812]: I1125 17:08:47.735054 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-vxkbj" Nov 25 17:08:47 crc kubenswrapper[4812]: I1125 17:08:47.737190 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 17:08:47 crc kubenswrapper[4812]: I1125 17:08:47.737526 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 17:08:47 crc kubenswrapper[4812]: I1125 17:08:47.738999 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 17:08:47 crc kubenswrapper[4812]: I1125 17:08:47.750973 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wtld4" Nov 25 17:08:47 crc kubenswrapper[4812]: I1125 17:08:47.755079 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-vxkbj"] Nov 25 17:08:47 crc kubenswrapper[4812]: I1125 17:08:47.818865 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/96745b0b-7343-4676-b692-46a52e6cfcb4-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-vxkbj\" (UID: \"96745b0b-7343-4676-b692-46a52e6cfcb4\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-vxkbj" Nov 25 17:08:47 crc kubenswrapper[4812]: I1125 17:08:47.818916 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/96745b0b-7343-4676-b692-46a52e6cfcb4-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-vxkbj\" (UID: \"96745b0b-7343-4676-b692-46a52e6cfcb4\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-vxkbj" Nov 25 17:08:47 crc kubenswrapper[4812]: I1125 17:08:47.818956 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8x2ps\" (UniqueName: \"kubernetes.io/projected/96745b0b-7343-4676-b692-46a52e6cfcb4-kube-api-access-8x2ps\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-vxkbj\" (UID: \"96745b0b-7343-4676-b692-46a52e6cfcb4\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-vxkbj" Nov 25 17:08:47 crc kubenswrapper[4812]: I1125 17:08:47.819381 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96745b0b-7343-4676-b692-46a52e6cfcb4-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-vxkbj\" (UID: \"96745b0b-7343-4676-b692-46a52e6cfcb4\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-vxkbj" Nov 25 17:08:47 crc kubenswrapper[4812]: I1125 17:08:47.921896 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96745b0b-7343-4676-b692-46a52e6cfcb4-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-vxkbj\" (UID: \"96745b0b-7343-4676-b692-46a52e6cfcb4\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-vxkbj" Nov 25 17:08:47 crc kubenswrapper[4812]: I1125 17:08:47.922178 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/96745b0b-7343-4676-b692-46a52e6cfcb4-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-vxkbj\" (UID: \"96745b0b-7343-4676-b692-46a52e6cfcb4\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-vxkbj" Nov 25 17:08:47 crc kubenswrapper[4812]: I1125 17:08:47.922217 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/96745b0b-7343-4676-b692-46a52e6cfcb4-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-vxkbj\" (UID: \"96745b0b-7343-4676-b692-46a52e6cfcb4\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-vxkbj" Nov 25 17:08:47 crc kubenswrapper[4812]: I1125 17:08:47.922269 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8x2ps\" (UniqueName: \"kubernetes.io/projected/96745b0b-7343-4676-b692-46a52e6cfcb4-kube-api-access-8x2ps\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-vxkbj\" (UID: \"96745b0b-7343-4676-b692-46a52e6cfcb4\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-vxkbj" Nov 25 17:08:47 crc kubenswrapper[4812]: I1125 17:08:47.927154 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/96745b0b-7343-4676-b692-46a52e6cfcb4-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-vxkbj\" (UID: \"96745b0b-7343-4676-b692-46a52e6cfcb4\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-vxkbj" Nov 25 17:08:47 crc kubenswrapper[4812]: I1125 17:08:47.927490 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96745b0b-7343-4676-b692-46a52e6cfcb4-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-vxkbj\" (UID: \"96745b0b-7343-4676-b692-46a52e6cfcb4\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-vxkbj" Nov 25 17:08:47 crc kubenswrapper[4812]: I1125 17:08:47.929664 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/96745b0b-7343-4676-b692-46a52e6cfcb4-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-vxkbj\" (UID: \"96745b0b-7343-4676-b692-46a52e6cfcb4\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-vxkbj" Nov 25 17:08:47 crc kubenswrapper[4812]: I1125 17:08:47.937214 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8x2ps\" (UniqueName: \"kubernetes.io/projected/96745b0b-7343-4676-b692-46a52e6cfcb4-kube-api-access-8x2ps\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-vxkbj\" (UID: \"96745b0b-7343-4676-b692-46a52e6cfcb4\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-vxkbj" Nov 25 17:08:48 crc kubenswrapper[4812]: I1125 17:08:48.061487 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-vxkbj" Nov 25 17:08:48 crc kubenswrapper[4812]: I1125 17:08:48.140520 4812 generic.go:334] "Generic (PLEG): container finished" podID="19ea6ea2-55a4-42a9-b5c2-25cf5c486b10" containerID="84c6b43c201d52ba3ccda5dd8e3509a9c6ed9832b0934db81e601aca18dd2ec7" exitCode=0 Nov 25 17:08:48 crc kubenswrapper[4812]: I1125 17:08:48.140612 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"19ea6ea2-55a4-42a9-b5c2-25cf5c486b10","Type":"ContainerDied","Data":"84c6b43c201d52ba3ccda5dd8e3509a9c6ed9832b0934db81e601aca18dd2ec7"} Nov 25 17:08:48 crc kubenswrapper[4812]: I1125 17:08:48.147289 4812 generic.go:334] "Generic (PLEG): container finished" podID="08e85ac8-ef2c-4220-a08a-83a390cfce7e" containerID="520196cc531ce1c22b120b1f38f35b83535f1f292c81cededee336e6101c7080" exitCode=0 Nov 25 17:08:48 crc kubenswrapper[4812]: I1125 17:08:48.147325 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"08e85ac8-ef2c-4220-a08a-83a390cfce7e","Type":"ContainerDied","Data":"520196cc531ce1c22b120b1f38f35b83535f1f292c81cededee336e6101c7080"} Nov 25 17:08:48 crc kubenswrapper[4812]: W1125 17:08:48.690706 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod96745b0b_7343_4676_b692_46a52e6cfcb4.slice/crio-bccaecb34cb70401131a8e536e014bee4edf053b8492d3d3b96149e580942ac3 WatchSource:0}: Error finding container bccaecb34cb70401131a8e536e014bee4edf053b8492d3d3b96149e580942ac3: Status 404 returned error can't find the container with id bccaecb34cb70401131a8e536e014bee4edf053b8492d3d3b96149e580942ac3 Nov 25 17:08:48 crc kubenswrapper[4812]: I1125 17:08:48.692199 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-vxkbj"] Nov 25 17:08:48 crc kubenswrapper[4812]: I1125 17:08:48.693496 4812 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 17:08:49 crc kubenswrapper[4812]: I1125 17:08:49.156803 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"08e85ac8-ef2c-4220-a08a-83a390cfce7e","Type":"ContainerStarted","Data":"f288a921c1536eda75e5982ab25e0c93244281f0ec8e5cbdbac03aa4d913824c"} Nov 25 17:08:49 crc kubenswrapper[4812]: I1125 17:08:49.157339 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:08:49 crc kubenswrapper[4812]: I1125 17:08:49.158885 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-vxkbj" event={"ID":"96745b0b-7343-4676-b692-46a52e6cfcb4","Type":"ContainerStarted","Data":"bccaecb34cb70401131a8e536e014bee4edf053b8492d3d3b96149e580942ac3"} Nov 25 17:08:49 crc kubenswrapper[4812]: I1125 17:08:49.160774 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"19ea6ea2-55a4-42a9-b5c2-25cf5c486b10","Type":"ContainerStarted","Data":"d531e79f87161d0689d68c27bdfb60fc4d353f15d5a4c0e0284f3931fbab2d17"} Nov 25 17:08:49 crc kubenswrapper[4812]: I1125 17:08:49.161030 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 25 17:08:49 crc kubenswrapper[4812]: I1125 17:08:49.225485 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=36.225453743 podStartE2EDuration="36.225453743s" podCreationTimestamp="2025-11-25 17:08:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:08:49.199500895 +0000 UTC m=+1304.039643000" watchObservedRunningTime="2025-11-25 17:08:49.225453743 +0000 UTC m=+1304.065595838" Nov 25 17:08:49 crc kubenswrapper[4812]: I1125 17:08:49.229635 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=37.229617406 podStartE2EDuration="37.229617406s" podCreationTimestamp="2025-11-25 17:08:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:08:49.224394575 +0000 UTC m=+1304.064536680" watchObservedRunningTime="2025-11-25 17:08:49.229617406 +0000 UTC m=+1304.069759501" Nov 25 17:08:59 crc kubenswrapper[4812]: I1125 17:08:59.298242 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-vxkbj" event={"ID":"96745b0b-7343-4676-b692-46a52e6cfcb4","Type":"ContainerStarted","Data":"79f2cf22d092d9b8fe94e1f1c6756b1553e623a8650ca5b4b6f22aef7f1539e6"} Nov 25 17:08:59 crc kubenswrapper[4812]: I1125 17:08:59.328900 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-vxkbj" podStartSLOduration=2.949035583 podStartE2EDuration="12.328879753s" podCreationTimestamp="2025-11-25 17:08:47 +0000 UTC" firstStartedPulling="2025-11-25 17:08:48.693209178 +0000 UTC m=+1303.533351273" lastFinishedPulling="2025-11-25 17:08:58.073053348 +0000 UTC m=+1312.913195443" observedRunningTime="2025-11-25 17:08:59.316669834 +0000 UTC m=+1314.156811969" watchObservedRunningTime="2025-11-25 17:08:59.328879753 +0000 UTC m=+1314.169021868" Nov 25 17:09:03 crc kubenswrapper[4812]: I1125 17:09:03.385847 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 25 17:09:03 crc kubenswrapper[4812]: I1125 17:09:03.477694 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 25 17:09:11 crc kubenswrapper[4812]: I1125 17:09:11.413981 4812 generic.go:334] "Generic (PLEG): container finished" podID="96745b0b-7343-4676-b692-46a52e6cfcb4" containerID="79f2cf22d092d9b8fe94e1f1c6756b1553e623a8650ca5b4b6f22aef7f1539e6" exitCode=0 Nov 25 17:09:11 crc kubenswrapper[4812]: I1125 17:09:11.414074 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-vxkbj" event={"ID":"96745b0b-7343-4676-b692-46a52e6cfcb4","Type":"ContainerDied","Data":"79f2cf22d092d9b8fe94e1f1c6756b1553e623a8650ca5b4b6f22aef7f1539e6"} Nov 25 17:09:12 crc kubenswrapper[4812]: I1125 17:09:12.827119 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-vxkbj" Nov 25 17:09:12 crc kubenswrapper[4812]: I1125 17:09:12.886437 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96745b0b-7343-4676-b692-46a52e6cfcb4-repo-setup-combined-ca-bundle\") pod \"96745b0b-7343-4676-b692-46a52e6cfcb4\" (UID: \"96745b0b-7343-4676-b692-46a52e6cfcb4\") " Nov 25 17:09:12 crc kubenswrapper[4812]: I1125 17:09:12.886591 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8x2ps\" (UniqueName: \"kubernetes.io/projected/96745b0b-7343-4676-b692-46a52e6cfcb4-kube-api-access-8x2ps\") pod \"96745b0b-7343-4676-b692-46a52e6cfcb4\" (UID: \"96745b0b-7343-4676-b692-46a52e6cfcb4\") " Nov 25 17:09:12 crc kubenswrapper[4812]: I1125 17:09:12.892459 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96745b0b-7343-4676-b692-46a52e6cfcb4-kube-api-access-8x2ps" (OuterVolumeSpecName: "kube-api-access-8x2ps") pod "96745b0b-7343-4676-b692-46a52e6cfcb4" (UID: "96745b0b-7343-4676-b692-46a52e6cfcb4"). InnerVolumeSpecName "kube-api-access-8x2ps". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:09:12 crc kubenswrapper[4812]: I1125 17:09:12.892640 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96745b0b-7343-4676-b692-46a52e6cfcb4-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "96745b0b-7343-4676-b692-46a52e6cfcb4" (UID: "96745b0b-7343-4676-b692-46a52e6cfcb4"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:09:12 crc kubenswrapper[4812]: I1125 17:09:12.988341 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/96745b0b-7343-4676-b692-46a52e6cfcb4-ssh-key\") pod \"96745b0b-7343-4676-b692-46a52e6cfcb4\" (UID: \"96745b0b-7343-4676-b692-46a52e6cfcb4\") " Nov 25 17:09:12 crc kubenswrapper[4812]: I1125 17:09:12.988676 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/96745b0b-7343-4676-b692-46a52e6cfcb4-inventory\") pod \"96745b0b-7343-4676-b692-46a52e6cfcb4\" (UID: \"96745b0b-7343-4676-b692-46a52e6cfcb4\") " Nov 25 17:09:12 crc kubenswrapper[4812]: I1125 17:09:12.989635 4812 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96745b0b-7343-4676-b692-46a52e6cfcb4-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:09:12 crc kubenswrapper[4812]: I1125 17:09:12.989659 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8x2ps\" (UniqueName: \"kubernetes.io/projected/96745b0b-7343-4676-b692-46a52e6cfcb4-kube-api-access-8x2ps\") on node \"crc\" DevicePath \"\"" Nov 25 17:09:13 crc kubenswrapper[4812]: I1125 17:09:13.013830 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96745b0b-7343-4676-b692-46a52e6cfcb4-inventory" (OuterVolumeSpecName: "inventory") pod "96745b0b-7343-4676-b692-46a52e6cfcb4" (UID: "96745b0b-7343-4676-b692-46a52e6cfcb4"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:09:13 crc kubenswrapper[4812]: I1125 17:09:13.018610 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96745b0b-7343-4676-b692-46a52e6cfcb4-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "96745b0b-7343-4676-b692-46a52e6cfcb4" (UID: "96745b0b-7343-4676-b692-46a52e6cfcb4"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:09:13 crc kubenswrapper[4812]: I1125 17:09:13.091358 4812 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/96745b0b-7343-4676-b692-46a52e6cfcb4-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 17:09:13 crc kubenswrapper[4812]: I1125 17:09:13.091387 4812 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/96745b0b-7343-4676-b692-46a52e6cfcb4-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 17:09:13 crc kubenswrapper[4812]: I1125 17:09:13.436716 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-vxkbj" event={"ID":"96745b0b-7343-4676-b692-46a52e6cfcb4","Type":"ContainerDied","Data":"bccaecb34cb70401131a8e536e014bee4edf053b8492d3d3b96149e580942ac3"} Nov 25 17:09:13 crc kubenswrapper[4812]: I1125 17:09:13.436758 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-vxkbj" Nov 25 17:09:13 crc kubenswrapper[4812]: I1125 17:09:13.436766 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bccaecb34cb70401131a8e536e014bee4edf053b8492d3d3b96149e580942ac3" Nov 25 17:09:13 crc kubenswrapper[4812]: I1125 17:09:13.508786 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-855kz"] Nov 25 17:09:13 crc kubenswrapper[4812]: E1125 17:09:13.509417 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96745b0b-7343-4676-b692-46a52e6cfcb4" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 25 17:09:13 crc kubenswrapper[4812]: I1125 17:09:13.513390 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="96745b0b-7343-4676-b692-46a52e6cfcb4" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 25 17:09:13 crc kubenswrapper[4812]: I1125 17:09:13.513718 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="96745b0b-7343-4676-b692-46a52e6cfcb4" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 25 17:09:13 crc kubenswrapper[4812]: I1125 17:09:13.514915 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-855kz" Nov 25 17:09:13 crc kubenswrapper[4812]: I1125 17:09:13.516888 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 17:09:13 crc kubenswrapper[4812]: I1125 17:09:13.518236 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wtld4" Nov 25 17:09:13 crc kubenswrapper[4812]: I1125 17:09:13.518405 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 17:09:13 crc kubenswrapper[4812]: I1125 17:09:13.518710 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 17:09:13 crc kubenswrapper[4812]: I1125 17:09:13.527467 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-855kz"] Nov 25 17:09:13 crc kubenswrapper[4812]: I1125 17:09:13.703220 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dec7f299-d822-494a-9a86-351502541b77-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-855kz\" (UID: \"dec7f299-d822-494a-9a86-351502541b77\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-855kz" Nov 25 17:09:13 crc kubenswrapper[4812]: I1125 17:09:13.703324 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cx75l\" (UniqueName: \"kubernetes.io/projected/dec7f299-d822-494a-9a86-351502541b77-kube-api-access-cx75l\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-855kz\" (UID: \"dec7f299-d822-494a-9a86-351502541b77\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-855kz" Nov 25 17:09:13 crc kubenswrapper[4812]: I1125 17:09:13.703381 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dec7f299-d822-494a-9a86-351502541b77-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-855kz\" (UID: \"dec7f299-d822-494a-9a86-351502541b77\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-855kz" Nov 25 17:09:13 crc kubenswrapper[4812]: I1125 17:09:13.703472 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/dec7f299-d822-494a-9a86-351502541b77-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-855kz\" (UID: \"dec7f299-d822-494a-9a86-351502541b77\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-855kz" Nov 25 17:09:13 crc kubenswrapper[4812]: I1125 17:09:13.805015 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dec7f299-d822-494a-9a86-351502541b77-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-855kz\" (UID: \"dec7f299-d822-494a-9a86-351502541b77\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-855kz" Nov 25 17:09:13 crc kubenswrapper[4812]: I1125 17:09:13.805089 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/dec7f299-d822-494a-9a86-351502541b77-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-855kz\" (UID: \"dec7f299-d822-494a-9a86-351502541b77\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-855kz" Nov 25 17:09:13 crc kubenswrapper[4812]: I1125 17:09:13.805261 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dec7f299-d822-494a-9a86-351502541b77-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-855kz\" (UID: \"dec7f299-d822-494a-9a86-351502541b77\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-855kz" Nov 25 17:09:13 crc kubenswrapper[4812]: I1125 17:09:13.805400 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cx75l\" (UniqueName: \"kubernetes.io/projected/dec7f299-d822-494a-9a86-351502541b77-kube-api-access-cx75l\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-855kz\" (UID: \"dec7f299-d822-494a-9a86-351502541b77\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-855kz" Nov 25 17:09:13 crc kubenswrapper[4812]: I1125 17:09:13.809096 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/dec7f299-d822-494a-9a86-351502541b77-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-855kz\" (UID: \"dec7f299-d822-494a-9a86-351502541b77\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-855kz" Nov 25 17:09:13 crc kubenswrapper[4812]: I1125 17:09:13.809316 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dec7f299-d822-494a-9a86-351502541b77-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-855kz\" (UID: \"dec7f299-d822-494a-9a86-351502541b77\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-855kz" Nov 25 17:09:13 crc kubenswrapper[4812]: I1125 17:09:13.820148 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dec7f299-d822-494a-9a86-351502541b77-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-855kz\" (UID: \"dec7f299-d822-494a-9a86-351502541b77\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-855kz" Nov 25 17:09:13 crc kubenswrapper[4812]: I1125 17:09:13.822751 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cx75l\" (UniqueName: \"kubernetes.io/projected/dec7f299-d822-494a-9a86-351502541b77-kube-api-access-cx75l\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-855kz\" (UID: \"dec7f299-d822-494a-9a86-351502541b77\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-855kz" Nov 25 17:09:13 crc kubenswrapper[4812]: I1125 17:09:13.840211 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-855kz" Nov 25 17:09:14 crc kubenswrapper[4812]: I1125 17:09:14.326419 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-855kz"] Nov 25 17:09:14 crc kubenswrapper[4812]: I1125 17:09:14.445370 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-855kz" event={"ID":"dec7f299-d822-494a-9a86-351502541b77","Type":"ContainerStarted","Data":"b29bae44bf5cafd7d2ab698802529aacabc2a94e48b44301eb397af0a69bfab5"} Nov 25 17:09:15 crc kubenswrapper[4812]: I1125 17:09:15.454777 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-855kz" event={"ID":"dec7f299-d822-494a-9a86-351502541b77","Type":"ContainerStarted","Data":"a75b9c52212987054edd4d53fecd8822dd3185c37899ee6dfd76c3a40c8cd155"} Nov 25 17:09:15 crc kubenswrapper[4812]: I1125 17:09:15.481157 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-855kz" podStartSLOduration=1.730465695 podStartE2EDuration="2.481139094s" podCreationTimestamp="2025-11-25 17:09:13 +0000 UTC" firstStartedPulling="2025-11-25 17:09:14.332200849 +0000 UTC m=+1329.172342944" lastFinishedPulling="2025-11-25 17:09:15.082874248 +0000 UTC m=+1329.923016343" observedRunningTime="2025-11-25 17:09:15.472247975 +0000 UTC m=+1330.312390090" watchObservedRunningTime="2025-11-25 17:09:15.481139094 +0000 UTC m=+1330.321281189" Nov 25 17:09:52 crc kubenswrapper[4812]: I1125 17:09:52.137273 4812 scope.go:117] "RemoveContainer" containerID="76b92ddb195df309e4615329b555ae060e8431f41e64e19dc3d66afa9126f61d" Nov 25 17:09:52 crc kubenswrapper[4812]: I1125 17:09:52.165089 4812 scope.go:117] "RemoveContainer" containerID="44a485c037869abdd67ffb3cc99739c0a39f7d76ed02783e064cb84eed2b4f89" Nov 25 17:09:57 crc kubenswrapper[4812]: I1125 17:09:57.332085 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:09:57 crc kubenswrapper[4812]: I1125 17:09:57.332505 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:10:05 crc kubenswrapper[4812]: I1125 17:10:05.845000 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 25 17:10:05 crc kubenswrapper[4812]: I1125 17:10:05.846428 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 25 17:10:05 crc kubenswrapper[4812]: I1125 17:10:05.846496 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 17:10:05 crc kubenswrapper[4812]: I1125 17:10:05.849042 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 25 17:10:05 crc kubenswrapper[4812]: I1125 17:10:05.849259 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 25 17:10:05 crc kubenswrapper[4812]: I1125 17:10:05.993109 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ec6f8bd9-bebb-49ba-9ccf-8e71c16819b6-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"ec6f8bd9-bebb-49ba-9ccf-8e71c16819b6\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 17:10:05 crc kubenswrapper[4812]: I1125 17:10:05.993485 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ec6f8bd9-bebb-49ba-9ccf-8e71c16819b6-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"ec6f8bd9-bebb-49ba-9ccf-8e71c16819b6\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 17:10:06 crc kubenswrapper[4812]: I1125 17:10:06.095630 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ec6f8bd9-bebb-49ba-9ccf-8e71c16819b6-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"ec6f8bd9-bebb-49ba-9ccf-8e71c16819b6\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 17:10:06 crc kubenswrapper[4812]: I1125 17:10:06.095693 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ec6f8bd9-bebb-49ba-9ccf-8e71c16819b6-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"ec6f8bd9-bebb-49ba-9ccf-8e71c16819b6\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 17:10:06 crc kubenswrapper[4812]: I1125 17:10:06.095761 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ec6f8bd9-bebb-49ba-9ccf-8e71c16819b6-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"ec6f8bd9-bebb-49ba-9ccf-8e71c16819b6\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 17:10:06 crc kubenswrapper[4812]: I1125 17:10:06.114380 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ec6f8bd9-bebb-49ba-9ccf-8e71c16819b6-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"ec6f8bd9-bebb-49ba-9ccf-8e71c16819b6\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 17:10:06 crc kubenswrapper[4812]: I1125 17:10:06.176631 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 17:10:06 crc kubenswrapper[4812]: I1125 17:10:06.632697 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 25 17:10:06 crc kubenswrapper[4812]: I1125 17:10:06.903407 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"ec6f8bd9-bebb-49ba-9ccf-8e71c16819b6","Type":"ContainerStarted","Data":"7c646b9dd6ebf69446a8b3b1c664853996c006495b4ca4a24a7bc5e3b43c7181"} Nov 25 17:10:07 crc kubenswrapper[4812]: I1125 17:10:07.912682 4812 generic.go:334] "Generic (PLEG): container finished" podID="ec6f8bd9-bebb-49ba-9ccf-8e71c16819b6" containerID="e39410d6f506e08b67780a4d8752a4b79d51608db4fb4a0bf264801642f984a8" exitCode=0 Nov 25 17:10:07 crc kubenswrapper[4812]: I1125 17:10:07.912773 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"ec6f8bd9-bebb-49ba-9ccf-8e71c16819b6","Type":"ContainerDied","Data":"e39410d6f506e08b67780a4d8752a4b79d51608db4fb4a0bf264801642f984a8"} Nov 25 17:10:09 crc kubenswrapper[4812]: I1125 17:10:09.260694 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 17:10:09 crc kubenswrapper[4812]: I1125 17:10:09.363163 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ec6f8bd9-bebb-49ba-9ccf-8e71c16819b6-kubelet-dir\") pod \"ec6f8bd9-bebb-49ba-9ccf-8e71c16819b6\" (UID: \"ec6f8bd9-bebb-49ba-9ccf-8e71c16819b6\") " Nov 25 17:10:09 crc kubenswrapper[4812]: I1125 17:10:09.363258 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ec6f8bd9-bebb-49ba-9ccf-8e71c16819b6-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "ec6f8bd9-bebb-49ba-9ccf-8e71c16819b6" (UID: "ec6f8bd9-bebb-49ba-9ccf-8e71c16819b6"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 17:10:09 crc kubenswrapper[4812]: I1125 17:10:09.363385 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ec6f8bd9-bebb-49ba-9ccf-8e71c16819b6-kube-api-access\") pod \"ec6f8bd9-bebb-49ba-9ccf-8e71c16819b6\" (UID: \"ec6f8bd9-bebb-49ba-9ccf-8e71c16819b6\") " Nov 25 17:10:09 crc kubenswrapper[4812]: I1125 17:10:09.363936 4812 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ec6f8bd9-bebb-49ba-9ccf-8e71c16819b6-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 17:10:09 crc kubenswrapper[4812]: I1125 17:10:09.372356 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec6f8bd9-bebb-49ba-9ccf-8e71c16819b6-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "ec6f8bd9-bebb-49ba-9ccf-8e71c16819b6" (UID: "ec6f8bd9-bebb-49ba-9ccf-8e71c16819b6"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:10:09 crc kubenswrapper[4812]: I1125 17:10:09.465646 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ec6f8bd9-bebb-49ba-9ccf-8e71c16819b6-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 17:10:09 crc kubenswrapper[4812]: I1125 17:10:09.932586 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"ec6f8bd9-bebb-49ba-9ccf-8e71c16819b6","Type":"ContainerDied","Data":"7c646b9dd6ebf69446a8b3b1c664853996c006495b4ca4a24a7bc5e3b43c7181"} Nov 25 17:10:09 crc kubenswrapper[4812]: I1125 17:10:09.932667 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7c646b9dd6ebf69446a8b3b1c664853996c006495b4ca4a24a7bc5e3b43c7181" Nov 25 17:10:09 crc kubenswrapper[4812]: I1125 17:10:09.932638 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 17:10:10 crc kubenswrapper[4812]: I1125 17:10:10.821398 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 25 17:10:10 crc kubenswrapper[4812]: E1125 17:10:10.822012 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec6f8bd9-bebb-49ba-9ccf-8e71c16819b6" containerName="pruner" Nov 25 17:10:10 crc kubenswrapper[4812]: I1125 17:10:10.822025 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec6f8bd9-bebb-49ba-9ccf-8e71c16819b6" containerName="pruner" Nov 25 17:10:10 crc kubenswrapper[4812]: I1125 17:10:10.822254 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec6f8bd9-bebb-49ba-9ccf-8e71c16819b6" containerName="pruner" Nov 25 17:10:10 crc kubenswrapper[4812]: I1125 17:10:10.822846 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 17:10:10 crc kubenswrapper[4812]: I1125 17:10:10.827509 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 25 17:10:10 crc kubenswrapper[4812]: I1125 17:10:10.828271 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 25 17:10:10 crc kubenswrapper[4812]: I1125 17:10:10.830642 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 25 17:10:10 crc kubenswrapper[4812]: I1125 17:10:10.994764 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9ce85523-4c30-4a7b-b909-3e8c257895ce-kube-api-access\") pod \"installer-9-crc\" (UID: \"9ce85523-4c30-4a7b-b909-3e8c257895ce\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 17:10:10 crc kubenswrapper[4812]: I1125 17:10:10.994809 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9ce85523-4c30-4a7b-b909-3e8c257895ce-kubelet-dir\") pod \"installer-9-crc\" (UID: \"9ce85523-4c30-4a7b-b909-3e8c257895ce\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 17:10:10 crc kubenswrapper[4812]: I1125 17:10:10.995119 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/9ce85523-4c30-4a7b-b909-3e8c257895ce-var-lock\") pod \"installer-9-crc\" (UID: \"9ce85523-4c30-4a7b-b909-3e8c257895ce\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 17:10:11 crc kubenswrapper[4812]: I1125 17:10:11.097368 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/9ce85523-4c30-4a7b-b909-3e8c257895ce-var-lock\") pod \"installer-9-crc\" (UID: \"9ce85523-4c30-4a7b-b909-3e8c257895ce\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 17:10:11 crc kubenswrapper[4812]: I1125 17:10:11.097566 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/9ce85523-4c30-4a7b-b909-3e8c257895ce-var-lock\") pod \"installer-9-crc\" (UID: \"9ce85523-4c30-4a7b-b909-3e8c257895ce\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 17:10:11 crc kubenswrapper[4812]: I1125 17:10:11.097607 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9ce85523-4c30-4a7b-b909-3e8c257895ce-kube-api-access\") pod \"installer-9-crc\" (UID: \"9ce85523-4c30-4a7b-b909-3e8c257895ce\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 17:10:11 crc kubenswrapper[4812]: I1125 17:10:11.097658 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9ce85523-4c30-4a7b-b909-3e8c257895ce-kubelet-dir\") pod \"installer-9-crc\" (UID: \"9ce85523-4c30-4a7b-b909-3e8c257895ce\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 17:10:11 crc kubenswrapper[4812]: I1125 17:10:11.097812 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9ce85523-4c30-4a7b-b909-3e8c257895ce-kubelet-dir\") pod \"installer-9-crc\" (UID: \"9ce85523-4c30-4a7b-b909-3e8c257895ce\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 17:10:11 crc kubenswrapper[4812]: I1125 17:10:11.113364 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9ce85523-4c30-4a7b-b909-3e8c257895ce-kube-api-access\") pod \"installer-9-crc\" (UID: \"9ce85523-4c30-4a7b-b909-3e8c257895ce\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 17:10:11 crc kubenswrapper[4812]: I1125 17:10:11.183830 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 17:10:11 crc kubenswrapper[4812]: I1125 17:10:11.614343 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 25 17:10:11 crc kubenswrapper[4812]: I1125 17:10:11.949684 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"9ce85523-4c30-4a7b-b909-3e8c257895ce","Type":"ContainerStarted","Data":"c3ed29fb0d42f950075a894dd279e6ee6e587c78b0f4baa02a825f0eb32198d9"} Nov 25 17:10:12 crc kubenswrapper[4812]: I1125 17:10:12.961547 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"9ce85523-4c30-4a7b-b909-3e8c257895ce","Type":"ContainerStarted","Data":"18f94a66a35c27c81b4359ccb9db8691d78687b9548fea41ab79a520c0d344b8"} Nov 25 17:10:12 crc kubenswrapper[4812]: I1125 17:10:12.988387 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=2.988364377 podStartE2EDuration="2.988364377s" podCreationTimestamp="2025-11-25 17:10:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:10:12.978468311 +0000 UTC m=+1387.818610406" watchObservedRunningTime="2025-11-25 17:10:12.988364377 +0000 UTC m=+1387.828506472" Nov 25 17:10:27 crc kubenswrapper[4812]: I1125 17:10:27.332335 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:10:27 crc kubenswrapper[4812]: I1125 17:10:27.333794 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.691467 4812 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.694909 4812 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.695075 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.695088 4812 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.695287 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483" gracePeriod=15 Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.695333 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084" gracePeriod=15 Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.695347 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe" gracePeriod=15 Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.695315 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8" gracePeriod=15 Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.695227 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747" gracePeriod=15 Nov 25 17:10:49 crc kubenswrapper[4812]: E1125 17:10:49.695824 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.695853 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 25 17:10:49 crc kubenswrapper[4812]: E1125 17:10:49.695873 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.695884 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 25 17:10:49 crc kubenswrapper[4812]: E1125 17:10:49.695899 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.695907 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 17:10:49 crc kubenswrapper[4812]: E1125 17:10:49.695915 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.695923 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 25 17:10:49 crc kubenswrapper[4812]: E1125 17:10:49.695940 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.695948 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 25 17:10:49 crc kubenswrapper[4812]: E1125 17:10:49.695965 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.695972 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.696182 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.696197 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.696207 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.696280 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.696291 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 25 17:10:49 crc kubenswrapper[4812]: E1125 17:10:49.696478 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.696488 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.696703 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.701031 4812 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="f4b27818a5e8e43d0dc095d08835c792" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.719194 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.719255 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.719374 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.719450 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.719596 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.720201 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.720293 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.720363 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.831503 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.831576 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.831600 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.831670 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.831696 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.831719 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.831746 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.831791 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.831928 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.832152 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.832158 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.832234 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.832246 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.832291 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.832337 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 17:10:49 crc kubenswrapper[4812]: I1125 17:10:49.832330 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 17:10:50 crc kubenswrapper[4812]: I1125 17:10:50.324816 4812 generic.go:334] "Generic (PLEG): container finished" podID="9ce85523-4c30-4a7b-b909-3e8c257895ce" containerID="18f94a66a35c27c81b4359ccb9db8691d78687b9548fea41ab79a520c0d344b8" exitCode=0 Nov 25 17:10:50 crc kubenswrapper[4812]: I1125 17:10:50.324944 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"9ce85523-4c30-4a7b-b909-3e8c257895ce","Type":"ContainerDied","Data":"18f94a66a35c27c81b4359ccb9db8691d78687b9548fea41ab79a520c0d344b8"} Nov 25 17:10:50 crc kubenswrapper[4812]: I1125 17:10:50.325846 4812 status_manager.go:851] "Failed to get status for pod" podUID="9ce85523-4c30-4a7b-b909-3e8c257895ce" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.188:6443: connect: connection refused" Nov 25 17:10:50 crc kubenswrapper[4812]: I1125 17:10:50.328373 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 25 17:10:50 crc kubenswrapper[4812]: I1125 17:10:50.329835 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 17:10:50 crc kubenswrapper[4812]: I1125 17:10:50.331621 4812 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8" exitCode=0 Nov 25 17:10:50 crc kubenswrapper[4812]: I1125 17:10:50.331852 4812 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483" exitCode=0 Nov 25 17:10:50 crc kubenswrapper[4812]: I1125 17:10:50.331872 4812 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe" exitCode=0 Nov 25 17:10:50 crc kubenswrapper[4812]: I1125 17:10:50.331883 4812 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084" exitCode=2 Nov 25 17:10:50 crc kubenswrapper[4812]: I1125 17:10:50.331963 4812 scope.go:117] "RemoveContainer" containerID="3da0685341163e486b0fa97726e98d45d9b8800f9cc8dba4efaa30b6fa86f407" Nov 25 17:10:51 crc kubenswrapper[4812]: I1125 17:10:51.344790 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 17:10:51 crc kubenswrapper[4812]: I1125 17:10:51.652122 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 17:10:51 crc kubenswrapper[4812]: I1125 17:10:51.652832 4812 status_manager.go:851] "Failed to get status for pod" podUID="9ce85523-4c30-4a7b-b909-3e8c257895ce" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.188:6443: connect: connection refused" Nov 25 17:10:51 crc kubenswrapper[4812]: I1125 17:10:51.763964 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/9ce85523-4c30-4a7b-b909-3e8c257895ce-var-lock\") pod \"9ce85523-4c30-4a7b-b909-3e8c257895ce\" (UID: \"9ce85523-4c30-4a7b-b909-3e8c257895ce\") " Nov 25 17:10:51 crc kubenswrapper[4812]: I1125 17:10:51.764844 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9ce85523-4c30-4a7b-b909-3e8c257895ce-var-lock" (OuterVolumeSpecName: "var-lock") pod "9ce85523-4c30-4a7b-b909-3e8c257895ce" (UID: "9ce85523-4c30-4a7b-b909-3e8c257895ce"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 17:10:51 crc kubenswrapper[4812]: I1125 17:10:51.765047 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9ce85523-4c30-4a7b-b909-3e8c257895ce-kube-api-access\") pod \"9ce85523-4c30-4a7b-b909-3e8c257895ce\" (UID: \"9ce85523-4c30-4a7b-b909-3e8c257895ce\") " Nov 25 17:10:51 crc kubenswrapper[4812]: I1125 17:10:51.765218 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9ce85523-4c30-4a7b-b909-3e8c257895ce-kubelet-dir\") pod \"9ce85523-4c30-4a7b-b909-3e8c257895ce\" (UID: \"9ce85523-4c30-4a7b-b909-3e8c257895ce\") " Nov 25 17:10:51 crc kubenswrapper[4812]: I1125 17:10:51.765927 4812 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/9ce85523-4c30-4a7b-b909-3e8c257895ce-var-lock\") on node \"crc\" DevicePath \"\"" Nov 25 17:10:51 crc kubenswrapper[4812]: I1125 17:10:51.765928 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9ce85523-4c30-4a7b-b909-3e8c257895ce-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "9ce85523-4c30-4a7b-b909-3e8c257895ce" (UID: "9ce85523-4c30-4a7b-b909-3e8c257895ce"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 17:10:51 crc kubenswrapper[4812]: I1125 17:10:51.771931 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ce85523-4c30-4a7b-b909-3e8c257895ce-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "9ce85523-4c30-4a7b-b909-3e8c257895ce" (UID: "9ce85523-4c30-4a7b-b909-3e8c257895ce"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:10:51 crc kubenswrapper[4812]: I1125 17:10:51.867706 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9ce85523-4c30-4a7b-b909-3e8c257895ce-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 17:10:51 crc kubenswrapper[4812]: I1125 17:10:51.868057 4812 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9ce85523-4c30-4a7b-b909-3e8c257895ce-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.257921 4812 scope.go:117] "RemoveContainer" containerID="8e321d4100a069bc630e5af23f86169ad9ffa38828583660ef122bd32d47e596" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.295025 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.295818 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.296625 4812 status_manager.go:851] "Failed to get status for pod" podUID="9ce85523-4c30-4a7b-b909-3e8c257895ce" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.188:6443: connect: connection refused" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.297221 4812 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.188:6443: connect: connection refused" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.353863 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.353861 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"9ce85523-4c30-4a7b-b909-3e8c257895ce","Type":"ContainerDied","Data":"c3ed29fb0d42f950075a894dd279e6ee6e587c78b0f4baa02a825f0eb32198d9"} Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.354659 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c3ed29fb0d42f950075a894dd279e6ee6e587c78b0f4baa02a825f0eb32198d9" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.357759 4812 status_manager.go:851] "Failed to get status for pod" podUID="9ce85523-4c30-4a7b-b909-3e8c257895ce" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.188:6443: connect: connection refused" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.357993 4812 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.188:6443: connect: connection refused" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.359361 4812 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747" exitCode=0 Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.359418 4812 scope.go:117] "RemoveContainer" containerID="d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.359440 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.360285 4812 status_manager.go:851] "Failed to get status for pod" podUID="9ce85523-4c30-4a7b-b909-3e8c257895ce" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.188:6443: connect: connection refused" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.360522 4812 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.188:6443: connect: connection refused" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.374569 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.374703 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.375678 4812 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.378605 4812 scope.go:117] "RemoveContainer" containerID="baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483" Nov 25 17:10:52 crc kubenswrapper[4812]: E1125 17:10:52.388170 4812 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.188:6443: connect: connection refused" Nov 25 17:10:52 crc kubenswrapper[4812]: E1125 17:10:52.388519 4812 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.188:6443: connect: connection refused" Nov 25 17:10:52 crc kubenswrapper[4812]: E1125 17:10:52.389001 4812 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.188:6443: connect: connection refused" Nov 25 17:10:52 crc kubenswrapper[4812]: E1125 17:10:52.389462 4812 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.188:6443: connect: connection refused" Nov 25 17:10:52 crc kubenswrapper[4812]: E1125 17:10:52.389925 4812 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.188:6443: connect: connection refused" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.389979 4812 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Nov 25 17:10:52 crc kubenswrapper[4812]: E1125 17:10:52.390302 4812 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.188:6443: connect: connection refused" interval="200ms" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.397196 4812 scope.go:117] "RemoveContainer" containerID="2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.413004 4812 scope.go:117] "RemoveContainer" containerID="52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.429349 4812 scope.go:117] "RemoveContainer" containerID="d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.449994 4812 scope.go:117] "RemoveContainer" containerID="998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.471222 4812 scope.go:117] "RemoveContainer" containerID="d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8" Nov 25 17:10:52 crc kubenswrapper[4812]: E1125 17:10:52.472069 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\": container with ID starting with d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8 not found: ID does not exist" containerID="d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.472099 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8"} err="failed to get container status \"d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\": rpc error: code = NotFound desc = could not find container \"d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8\": container with ID starting with d96a1ab7f21467817a4bd1db43b1fa0c91b92d0013572333fbdc78466264dbb8 not found: ID does not exist" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.472116 4812 scope.go:117] "RemoveContainer" containerID="baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483" Nov 25 17:10:52 crc kubenswrapper[4812]: E1125 17:10:52.472473 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\": container with ID starting with baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483 not found: ID does not exist" containerID="baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.472496 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483"} err="failed to get container status \"baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\": rpc error: code = NotFound desc = could not find container \"baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483\": container with ID starting with baa0c45e3fb06b7553fe74753aca177c6f7a8f84c5ba880134f6503e73ea8483 not found: ID does not exist" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.472508 4812 scope.go:117] "RemoveContainer" containerID="2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe" Nov 25 17:10:52 crc kubenswrapper[4812]: E1125 17:10:52.472717 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\": container with ID starting with 2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe not found: ID does not exist" containerID="2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.472735 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe"} err="failed to get container status \"2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\": rpc error: code = NotFound desc = could not find container \"2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe\": container with ID starting with 2a6fbbed9c2e9840fbb82a9f73110db85c3db74ad734ee4fb8733e1f02434efe not found: ID does not exist" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.472749 4812 scope.go:117] "RemoveContainer" containerID="52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084" Nov 25 17:10:52 crc kubenswrapper[4812]: E1125 17:10:52.472969 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\": container with ID starting with 52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084 not found: ID does not exist" containerID="52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.473016 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084"} err="failed to get container status \"52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\": rpc error: code = NotFound desc = could not find container \"52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084\": container with ID starting with 52b0fa30110efd344799c6fa6068c56841d0319c28a20886795fbf8529d6e084 not found: ID does not exist" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.473040 4812 scope.go:117] "RemoveContainer" containerID="d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747" Nov 25 17:10:52 crc kubenswrapper[4812]: E1125 17:10:52.473270 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\": container with ID starting with d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747 not found: ID does not exist" containerID="d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.473294 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747"} err="failed to get container status \"d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\": rpc error: code = NotFound desc = could not find container \"d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747\": container with ID starting with d62121cdd9f24317628e4c040834bf7603bcf1e0c947e8af75d3cef34aee9747 not found: ID does not exist" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.473307 4812 scope.go:117] "RemoveContainer" containerID="998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d" Nov 25 17:10:52 crc kubenswrapper[4812]: E1125 17:10:52.473633 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\": container with ID starting with 998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d not found: ID does not exist" containerID="998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.473650 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d"} err="failed to get container status \"998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\": rpc error: code = NotFound desc = could not find container \"998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d\": container with ID starting with 998004dab1ad7b341d8b16d29bde4c346c5fe05c8ffb436eece4abf11367e92d not found: ID does not exist" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.475909 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.475951 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.475996 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.476077 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.476569 4812 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.476587 4812 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 25 17:10:52 crc kubenswrapper[4812]: E1125 17:10:52.591576 4812 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.188:6443: connect: connection refused" interval="400ms" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.675751 4812 status_manager.go:851] "Failed to get status for pod" podUID="9ce85523-4c30-4a7b-b909-3e8c257895ce" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.188:6443: connect: connection refused" Nov 25 17:10:52 crc kubenswrapper[4812]: I1125 17:10:52.675915 4812 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.188:6443: connect: connection refused" Nov 25 17:10:53 crc kubenswrapper[4812]: E1125 17:10:53.001974 4812 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.188:6443: connect: connection refused" interval="800ms" Nov 25 17:10:53 crc kubenswrapper[4812]: E1125 17:10:53.803281 4812 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.188:6443: connect: connection refused" interval="1.6s" Nov 25 17:10:53 crc kubenswrapper[4812]: I1125 17:10:53.842632 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Nov 25 17:10:54 crc kubenswrapper[4812]: I1125 17:10:54.427091 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/kube-state-metrics-0" podUID="91bfadbe-a98d-49e4-88a9-97be162972a5" containerName="kube-state-metrics" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 25 17:10:54 crc kubenswrapper[4812]: E1125 17:10:54.427761 4812 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/events\": dial tcp 38.102.83.188:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-state-metrics-0.187b4f1a88bac49d openstack 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openstack,Name:kube-state-metrics-0,UID:91bfadbe-a98d-49e4-88a9-97be162972a5,APIVersion:v1,ResourceVersion:42168,FieldPath:spec.containers{kube-state-metrics},},Reason:Unhealthy,Message:Liveness probe failed: HTTP probe failed with statuscode: 503,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 17:10:54.427366557 +0000 UTC m=+1429.267508652,LastTimestamp:2025-11-25 17:10:54.427366557 +0000 UTC m=+1429.267508652,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 17:10:54 crc kubenswrapper[4812]: E1125 17:10:54.743831 4812 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.188:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 17:10:54 crc kubenswrapper[4812]: I1125 17:10:54.745231 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 17:10:55 crc kubenswrapper[4812]: I1125 17:10:55.392080 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"8ad5d89dd345e7340100873cdfdba709698c08cccb9980b09e786f24386e5949"} Nov 25 17:10:55 crc kubenswrapper[4812]: I1125 17:10:55.392617 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"528fcfdc592abd14a869d24dbc54d717a558314f2d4409c186e54ac3484bb644"} Nov 25 17:10:55 crc kubenswrapper[4812]: E1125 17:10:55.393323 4812 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.188:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 17:10:55 crc kubenswrapper[4812]: I1125 17:10:55.393477 4812 status_manager.go:851] "Failed to get status for pod" podUID="9ce85523-4c30-4a7b-b909-3e8c257895ce" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.188:6443: connect: connection refused" Nov 25 17:10:55 crc kubenswrapper[4812]: E1125 17:10:55.404655 4812 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.188:6443: connect: connection refused" interval="3.2s" Nov 25 17:10:55 crc kubenswrapper[4812]: I1125 17:10:55.836733 4812 status_manager.go:851] "Failed to get status for pod" podUID="9ce85523-4c30-4a7b-b909-3e8c257895ce" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.188:6443: connect: connection refused" Nov 25 17:10:57 crc kubenswrapper[4812]: I1125 17:10:57.333023 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:10:57 crc kubenswrapper[4812]: I1125 17:10:57.334161 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:10:57 crc kubenswrapper[4812]: I1125 17:10:57.334227 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" Nov 25 17:10:57 crc kubenswrapper[4812]: I1125 17:10:57.414599 4812 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"928bfffc467fb2700fc3f642988d9c74f29633743edcd7e42d0737b45e725dce"} pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 17:10:57 crc kubenswrapper[4812]: I1125 17:10:57.414731 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" containerID="cri-o://928bfffc467fb2700fc3f642988d9c74f29633743edcd7e42d0737b45e725dce" gracePeriod=600 Nov 25 17:10:58 crc kubenswrapper[4812]: I1125 17:10:58.424851 4812 generic.go:334] "Generic (PLEG): container finished" podID="8ed911cf-2139-4b12-84ba-af635585ba29" containerID="928bfffc467fb2700fc3f642988d9c74f29633743edcd7e42d0737b45e725dce" exitCode=0 Nov 25 17:10:58 crc kubenswrapper[4812]: I1125 17:10:58.424917 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" event={"ID":"8ed911cf-2139-4b12-84ba-af635585ba29","Type":"ContainerDied","Data":"928bfffc467fb2700fc3f642988d9c74f29633743edcd7e42d0737b45e725dce"} Nov 25 17:10:58 crc kubenswrapper[4812]: I1125 17:10:58.425352 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" event={"ID":"8ed911cf-2139-4b12-84ba-af635585ba29","Type":"ContainerStarted","Data":"ae34a05aa38c6a99c86afe89c120c203f727ec066fc3aabb6dbd6d38ccbc4ae5"} Nov 25 17:10:58 crc kubenswrapper[4812]: I1125 17:10:58.425412 4812 scope.go:117] "RemoveContainer" containerID="f4e03a1e42f2ab4e7283089fdd598dd4009c999c3046bed0520d29498108218e" Nov 25 17:10:58 crc kubenswrapper[4812]: I1125 17:10:58.426667 4812 status_manager.go:851] "Failed to get status for pod" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-lcgpx\": dial tcp 38.102.83.188:6443: connect: connection refused" Nov 25 17:10:58 crc kubenswrapper[4812]: I1125 17:10:58.427078 4812 status_manager.go:851] "Failed to get status for pod" podUID="9ce85523-4c30-4a7b-b909-3e8c257895ce" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.188:6443: connect: connection refused" Nov 25 17:10:58 crc kubenswrapper[4812]: E1125 17:10:58.606217 4812 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.188:6443: connect: connection refused" interval="6.4s" Nov 25 17:11:00 crc kubenswrapper[4812]: I1125 17:11:00.447237 4812 generic.go:334] "Generic (PLEG): container finished" podID="06fdd0d8-45b7-4787-9f77-24f76fccc672" containerID="1da88cc096d8df568c029f8c874429f0a4d07cfab2c6ecf55272cda60aaafc0e" exitCode=1 Nov 25 17:11:00 crc kubenswrapper[4812]: I1125 17:11:00.447307 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6f7b877f74-qcc8n" event={"ID":"06fdd0d8-45b7-4787-9f77-24f76fccc672","Type":"ContainerDied","Data":"1da88cc096d8df568c029f8c874429f0a4d07cfab2c6ecf55272cda60aaafc0e"} Nov 25 17:11:00 crc kubenswrapper[4812]: I1125 17:11:00.448980 4812 scope.go:117] "RemoveContainer" containerID="1da88cc096d8df568c029f8c874429f0a4d07cfab2c6ecf55272cda60aaafc0e" Nov 25 17:11:00 crc kubenswrapper[4812]: I1125 17:11:00.449992 4812 status_manager.go:851] "Failed to get status for pod" podUID="06fdd0d8-45b7-4787-9f77-24f76fccc672" pod="metallb-system/metallb-operator-controller-manager-6f7b877f74-qcc8n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-6f7b877f74-qcc8n\": dial tcp 38.102.83.188:6443: connect: connection refused" Nov 25 17:11:00 crc kubenswrapper[4812]: I1125 17:11:00.450921 4812 status_manager.go:851] "Failed to get status for pod" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-lcgpx\": dial tcp 38.102.83.188:6443: connect: connection refused" Nov 25 17:11:00 crc kubenswrapper[4812]: I1125 17:11:00.451249 4812 status_manager.go:851] "Failed to get status for pod" podUID="9ce85523-4c30-4a7b-b909-3e8c257895ce" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.188:6443: connect: connection refused" Nov 25 17:11:01 crc kubenswrapper[4812]: I1125 17:11:01.458707 4812 generic.go:334] "Generic (PLEG): container finished" podID="06fdd0d8-45b7-4787-9f77-24f76fccc672" containerID="f793eb47f68a598ad5c2a833f25c61b0b0a48864de5c4e91b257317dbe7c3948" exitCode=1 Nov 25 17:11:01 crc kubenswrapper[4812]: I1125 17:11:01.458791 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6f7b877f74-qcc8n" event={"ID":"06fdd0d8-45b7-4787-9f77-24f76fccc672","Type":"ContainerDied","Data":"f793eb47f68a598ad5c2a833f25c61b0b0a48864de5c4e91b257317dbe7c3948"} Nov 25 17:11:01 crc kubenswrapper[4812]: I1125 17:11:01.459080 4812 scope.go:117] "RemoveContainer" containerID="1da88cc096d8df568c029f8c874429f0a4d07cfab2c6ecf55272cda60aaafc0e" Nov 25 17:11:01 crc kubenswrapper[4812]: I1125 17:11:01.459848 4812 scope.go:117] "RemoveContainer" containerID="f793eb47f68a598ad5c2a833f25c61b0b0a48864de5c4e91b257317dbe7c3948" Nov 25 17:11:01 crc kubenswrapper[4812]: I1125 17:11:01.460019 4812 status_manager.go:851] "Failed to get status for pod" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-lcgpx\": dial tcp 38.102.83.188:6443: connect: connection refused" Nov 25 17:11:01 crc kubenswrapper[4812]: E1125 17:11:01.460136 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=metallb-operator-controller-manager-6f7b877f74-qcc8n_metallb-system(06fdd0d8-45b7-4787-9f77-24f76fccc672)\"" pod="metallb-system/metallb-operator-controller-manager-6f7b877f74-qcc8n" podUID="06fdd0d8-45b7-4787-9f77-24f76fccc672" Nov 25 17:11:01 crc kubenswrapper[4812]: I1125 17:11:01.460375 4812 status_manager.go:851] "Failed to get status for pod" podUID="9ce85523-4c30-4a7b-b909-3e8c257895ce" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.188:6443: connect: connection refused" Nov 25 17:11:01 crc kubenswrapper[4812]: I1125 17:11:01.460691 4812 status_manager.go:851] "Failed to get status for pod" podUID="06fdd0d8-45b7-4787-9f77-24f76fccc672" pod="metallb-system/metallb-operator-controller-manager-6f7b877f74-qcc8n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-6f7b877f74-qcc8n\": dial tcp 38.102.83.188:6443: connect: connection refused" Nov 25 17:11:01 crc kubenswrapper[4812]: I1125 17:11:01.871313 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 17:11:01 crc kubenswrapper[4812]: I1125 17:11:01.872491 4812 status_manager.go:851] "Failed to get status for pod" podUID="06fdd0d8-45b7-4787-9f77-24f76fccc672" pod="metallb-system/metallb-operator-controller-manager-6f7b877f74-qcc8n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-6f7b877f74-qcc8n\": dial tcp 38.102.83.188:6443: connect: connection refused" Nov 25 17:11:01 crc kubenswrapper[4812]: I1125 17:11:01.873763 4812 status_manager.go:851] "Failed to get status for pod" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-lcgpx\": dial tcp 38.102.83.188:6443: connect: connection refused" Nov 25 17:11:01 crc kubenswrapper[4812]: I1125 17:11:01.874238 4812 status_manager.go:851] "Failed to get status for pod" podUID="9ce85523-4c30-4a7b-b909-3e8c257895ce" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.188:6443: connect: connection refused" Nov 25 17:11:01 crc kubenswrapper[4812]: I1125 17:11:01.887087 4812 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="bacb2994-771c-4add-8b78-afff14608f76" Nov 25 17:11:01 crc kubenswrapper[4812]: I1125 17:11:01.887132 4812 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="bacb2994-771c-4add-8b78-afff14608f76" Nov 25 17:11:01 crc kubenswrapper[4812]: E1125 17:11:01.887834 4812 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.188:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 17:11:01 crc kubenswrapper[4812]: I1125 17:11:01.888698 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 17:11:01 crc kubenswrapper[4812]: W1125 17:11:01.917068 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-cbb561279d503f3d6e997f48fbf7bc5e0cd1ce94ec0131a8988ab71907e11c9b WatchSource:0}: Error finding container cbb561279d503f3d6e997f48fbf7bc5e0cd1ce94ec0131a8988ab71907e11c9b: Status 404 returned error can't find the container with id cbb561279d503f3d6e997f48fbf7bc5e0cd1ce94ec0131a8988ab71907e11c9b Nov 25 17:11:02 crc kubenswrapper[4812]: I1125 17:11:02.468320 4812 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="4914b3404454c1e3120a8c7f8e23e45de3011b09c7784aa7efb2f856a25c532b" exitCode=0 Nov 25 17:11:02 crc kubenswrapper[4812]: I1125 17:11:02.468381 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"4914b3404454c1e3120a8c7f8e23e45de3011b09c7784aa7efb2f856a25c532b"} Nov 25 17:11:02 crc kubenswrapper[4812]: I1125 17:11:02.468726 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"cbb561279d503f3d6e997f48fbf7bc5e0cd1ce94ec0131a8988ab71907e11c9b"} Nov 25 17:11:02 crc kubenswrapper[4812]: I1125 17:11:02.469053 4812 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="bacb2994-771c-4add-8b78-afff14608f76" Nov 25 17:11:02 crc kubenswrapper[4812]: I1125 17:11:02.469071 4812 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="bacb2994-771c-4add-8b78-afff14608f76" Nov 25 17:11:02 crc kubenswrapper[4812]: E1125 17:11:02.469489 4812 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.188:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 17:11:02 crc kubenswrapper[4812]: I1125 17:11:02.469839 4812 status_manager.go:851] "Failed to get status for pod" podUID="06fdd0d8-45b7-4787-9f77-24f76fccc672" pod="metallb-system/metallb-operator-controller-manager-6f7b877f74-qcc8n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-6f7b877f74-qcc8n\": dial tcp 38.102.83.188:6443: connect: connection refused" Nov 25 17:11:02 crc kubenswrapper[4812]: I1125 17:11:02.470069 4812 status_manager.go:851] "Failed to get status for pod" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-lcgpx\": dial tcp 38.102.83.188:6443: connect: connection refused" Nov 25 17:11:02 crc kubenswrapper[4812]: I1125 17:11:02.470301 4812 status_manager.go:851] "Failed to get status for pod" podUID="9ce85523-4c30-4a7b-b909-3e8c257895ce" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.188:6443: connect: connection refused" Nov 25 17:11:03 crc kubenswrapper[4812]: I1125 17:11:03.509069 4812 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Liveness probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Nov 25 17:11:03 crc kubenswrapper[4812]: I1125 17:11:03.509389 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Nov 25 17:11:03 crc kubenswrapper[4812]: I1125 17:11:03.525571 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"b4f09d709855e132350f5367e89d99e8a51ebde8033e362a8558250bc2a93f5f"} Nov 25 17:11:03 crc kubenswrapper[4812]: I1125 17:11:03.525625 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"d98e7c181a9af4edc3d7da408a4cdfc65606de4e517f402988a7294456f0b67c"} Nov 25 17:11:03 crc kubenswrapper[4812]: I1125 17:11:03.525639 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"2ab01ba7f5df40887bef85388f248e3deaba85ce90abad52b607fc7d6664e7ad"} Nov 25 17:11:03 crc kubenswrapper[4812]: I1125 17:11:03.533707 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 25 17:11:03 crc kubenswrapper[4812]: I1125 17:11:03.533764 4812 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="794035d1a2dbe8b5a1d28a4174ca6729fed6f236749008a13826d8541edd5a21" exitCode=1 Nov 25 17:11:03 crc kubenswrapper[4812]: I1125 17:11:03.533798 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"794035d1a2dbe8b5a1d28a4174ca6729fed6f236749008a13826d8541edd5a21"} Nov 25 17:11:03 crc kubenswrapper[4812]: I1125 17:11:03.534451 4812 scope.go:117] "RemoveContainer" containerID="794035d1a2dbe8b5a1d28a4174ca6729fed6f236749008a13826d8541edd5a21" Nov 25 17:11:04 crc kubenswrapper[4812]: I1125 17:11:04.439822 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/kube-state-metrics-0" podUID="91bfadbe-a98d-49e4-88a9-97be162972a5" containerName="kube-state-metrics" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 25 17:11:04 crc kubenswrapper[4812]: I1125 17:11:04.463870 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-6f7b877f74-qcc8n" Nov 25 17:11:04 crc kubenswrapper[4812]: I1125 17:11:04.464480 4812 scope.go:117] "RemoveContainer" containerID="f793eb47f68a598ad5c2a833f25c61b0b0a48864de5c4e91b257317dbe7c3948" Nov 25 17:11:04 crc kubenswrapper[4812]: E1125 17:11:04.464878 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=metallb-operator-controller-manager-6f7b877f74-qcc8n_metallb-system(06fdd0d8-45b7-4787-9f77-24f76fccc672)\"" pod="metallb-system/metallb-operator-controller-manager-6f7b877f74-qcc8n" podUID="06fdd0d8-45b7-4787-9f77-24f76fccc672" Nov 25 17:11:04 crc kubenswrapper[4812]: I1125 17:11:04.544801 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"dc5a4fc4892c6432dd5923de36cd0a5b01da6c536791145722fc5d5fa718482a"} Nov 25 17:11:04 crc kubenswrapper[4812]: I1125 17:11:04.544849 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"6d856dc2730972cc92bd7f90c04eb9033405cb43ab1b51c73710d262f62517a5"} Nov 25 17:11:04 crc kubenswrapper[4812]: I1125 17:11:04.544952 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 17:11:04 crc kubenswrapper[4812]: I1125 17:11:04.545052 4812 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="bacb2994-771c-4add-8b78-afff14608f76" Nov 25 17:11:04 crc kubenswrapper[4812]: I1125 17:11:04.545070 4812 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="bacb2994-771c-4add-8b78-afff14608f76" Nov 25 17:11:04 crc kubenswrapper[4812]: I1125 17:11:04.547400 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 25 17:11:04 crc kubenswrapper[4812]: I1125 17:11:04.547461 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"890487d7435edd9d2ab65a9343e7bc58c435f441157d4df66603765545eabf74"} Nov 25 17:11:06 crc kubenswrapper[4812]: I1125 17:11:06.389811 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 17:11:06 crc kubenswrapper[4812]: I1125 17:11:06.889873 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 17:11:06 crc kubenswrapper[4812]: I1125 17:11:06.890256 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 17:11:06 crc kubenswrapper[4812]: I1125 17:11:06.895127 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 17:11:08 crc kubenswrapper[4812]: I1125 17:11:08.982270 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 17:11:08 crc kubenswrapper[4812]: I1125 17:11:08.988456 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 17:11:09 crc kubenswrapper[4812]: I1125 17:11:09.554673 4812 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 17:11:09 crc kubenswrapper[4812]: I1125 17:11:09.600369 4812 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="bacb2994-771c-4add-8b78-afff14608f76" Nov 25 17:11:09 crc kubenswrapper[4812]: I1125 17:11:09.600658 4812 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="bacb2994-771c-4add-8b78-afff14608f76" Nov 25 17:11:09 crc kubenswrapper[4812]: I1125 17:11:09.608696 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 17:11:09 crc kubenswrapper[4812]: I1125 17:11:09.611339 4812 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="18c7bb9c-67c0-4564-a4a1-b49a0c047768" Nov 25 17:11:10 crc kubenswrapper[4812]: I1125 17:11:10.607599 4812 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="bacb2994-771c-4add-8b78-afff14608f76" Nov 25 17:11:10 crc kubenswrapper[4812]: I1125 17:11:10.608835 4812 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="bacb2994-771c-4add-8b78-afff14608f76" Nov 25 17:11:11 crc kubenswrapper[4812]: I1125 17:11:11.616542 4812 generic.go:334] "Generic (PLEG): container finished" podID="bac38f31-ec39-46b9-9bac-2920864fb8a2" containerID="3114fd0fb85bc1c0d9890f4b33a6c01898d8ae805772bb83434dabd836391fa9" exitCode=1 Nov 25 17:11:11 crc kubenswrapper[4812]: I1125 17:11:11.616565 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-clwgz" event={"ID":"bac38f31-ec39-46b9-9bac-2920864fb8a2","Type":"ContainerDied","Data":"3114fd0fb85bc1c0d9890f4b33a6c01898d8ae805772bb83434dabd836391fa9"} Nov 25 17:11:11 crc kubenswrapper[4812]: I1125 17:11:11.617658 4812 scope.go:117] "RemoveContainer" containerID="3114fd0fb85bc1c0d9890f4b33a6c01898d8ae805772bb83434dabd836391fa9" Nov 25 17:11:11 crc kubenswrapper[4812]: I1125 17:11:11.618685 4812 generic.go:334] "Generic (PLEG): container finished" podID="2023c319-572e-4d1d-bb2a-56e3842430db" containerID="ec8b03fb1be3d0d049a1d713c6a34ceb9bbb5c3e709af78690096b8950502244" exitCode=1 Nov 25 17:11:11 crc kubenswrapper[4812]: I1125 17:11:11.618703 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-55cd74b98b-lcn4x" event={"ID":"2023c319-572e-4d1d-bb2a-56e3842430db","Type":"ContainerDied","Data":"ec8b03fb1be3d0d049a1d713c6a34ceb9bbb5c3e709af78690096b8950502244"} Nov 25 17:11:11 crc kubenswrapper[4812]: I1125 17:11:11.619304 4812 scope.go:117] "RemoveContainer" containerID="ec8b03fb1be3d0d049a1d713c6a34ceb9bbb5c3e709af78690096b8950502244" Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.627695 4812 generic.go:334] "Generic (PLEG): container finished" podID="07907797-7edd-48e0-bb69-e42ad740f173" containerID="0f80e5511d8dc1a0e4560528ad2a6eade741bdab9fcd954359b8192391929042" exitCode=1 Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.627776 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-h9l5r" event={"ID":"07907797-7edd-48e0-bb69-e42ad740f173","Type":"ContainerDied","Data":"0f80e5511d8dc1a0e4560528ad2a6eade741bdab9fcd954359b8192391929042"} Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.628731 4812 scope.go:117] "RemoveContainer" containerID="0f80e5511d8dc1a0e4560528ad2a6eade741bdab9fcd954359b8192391929042" Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.630657 4812 generic.go:334] "Generic (PLEG): container finished" podID="13981b57-58d8-42f8-a833-f9760f06df20" containerID="860702da5ff8378a85ab73909c5f0c8a56a1dea49f0bd2eb619657a06847753c" exitCode=1 Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.630722 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cb74df96-jnhr9" event={"ID":"13981b57-58d8-42f8-a833-f9760f06df20","Type":"ContainerDied","Data":"860702da5ff8378a85ab73909c5f0c8a56a1dea49f0bd2eb619657a06847753c"} Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.631008 4812 scope.go:117] "RemoveContainer" containerID="860702da5ff8378a85ab73909c5f0c8a56a1dea49f0bd2eb619657a06847753c" Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.634460 4812 generic.go:334] "Generic (PLEG): container finished" podID="10dee73c-a6d8-429d-b5c0-9226eec6d1f3" containerID="c497239c53b0bb0aea6346e5b2828b86590ebf4ea5affca580af0e62937a59b3" exitCode=1 Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.634550 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-2gwk8" event={"ID":"10dee73c-a6d8-429d-b5c0-9226eec6d1f3","Type":"ContainerDied","Data":"c497239c53b0bb0aea6346e5b2828b86590ebf4ea5affca580af0e62937a59b3"} Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.635009 4812 scope.go:117] "RemoveContainer" containerID="c497239c53b0bb0aea6346e5b2828b86590ebf4ea5affca580af0e62937a59b3" Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.637892 4812 generic.go:334] "Generic (PLEG): container finished" podID="bac38f31-ec39-46b9-9bac-2920864fb8a2" containerID="f66145e7cdf5247b4b160714dafbbe9896c9c319b2c62d097bb4b8c2fcfcfa4c" exitCode=1 Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.637948 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-clwgz" event={"ID":"bac38f31-ec39-46b9-9bac-2920864fb8a2","Type":"ContainerDied","Data":"f66145e7cdf5247b4b160714dafbbe9896c9c319b2c62d097bb4b8c2fcfcfa4c"} Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.637975 4812 scope.go:117] "RemoveContainer" containerID="3114fd0fb85bc1c0d9890f4b33a6c01898d8ae805772bb83434dabd836391fa9" Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.638402 4812 scope.go:117] "RemoveContainer" containerID="f66145e7cdf5247b4b160714dafbbe9896c9c319b2c62d097bb4b8c2fcfcfa4c" Nov 25 17:11:12 crc kubenswrapper[4812]: E1125 17:11:12.638687 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=barbican-operator-controller-manager-86dc4d89c8-clwgz_openstack-operators(bac38f31-ec39-46b9-9bac-2920864fb8a2)\"" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-clwgz" podUID="bac38f31-ec39-46b9-9bac-2920864fb8a2" Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.641920 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-55cd74b98b-lcn4x" event={"ID":"2023c319-572e-4d1d-bb2a-56e3842430db","Type":"ContainerStarted","Data":"1ddd7990d5123668d31c9fbeb220975fcd639b1c16391e4a5cddc22ebca43b78"} Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.642138 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-55cd74b98b-lcn4x" Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.643779 4812 generic.go:334] "Generic (PLEG): container finished" podID="7f72311f-8622-43f6-b499-8b52318b0e2a" containerID="a57d9f197214c8f522b46a33a7f8df91bce62137fe97771b61ffdd37fdc75b36" exitCode=1 Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.643851 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-zzhb4" event={"ID":"7f72311f-8622-43f6-b499-8b52318b0e2a","Type":"ContainerDied","Data":"a57d9f197214c8f522b46a33a7f8df91bce62137fe97771b61ffdd37fdc75b36"} Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.644630 4812 scope.go:117] "RemoveContainer" containerID="a57d9f197214c8f522b46a33a7f8df91bce62137fe97771b61ffdd37fdc75b36" Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.646654 4812 generic.go:334] "Generic (PLEG): container finished" podID="98133284-26db-4073-a43c-f9572476153c" containerID="1589153ca57a370c41d950d1ef41a4a6af430c627f5364bdb0ba6a4ace1e65d2" exitCode=1 Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.646724 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-vzgsw" event={"ID":"98133284-26db-4073-a43c-f9572476153c","Type":"ContainerDied","Data":"1589153ca57a370c41d950d1ef41a4a6af430c627f5364bdb0ba6a4ace1e65d2"} Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.647216 4812 scope.go:117] "RemoveContainer" containerID="1589153ca57a370c41d950d1ef41a4a6af430c627f5364bdb0ba6a4ace1e65d2" Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.648894 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-clwgz" Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.649402 4812 generic.go:334] "Generic (PLEG): container finished" podID="4c649e41-10e8-4eee-bfc0-bf1a9409e421" containerID="32cefc8394ca80e9b833f9439bdf862cf04c66049d85a0c77578bb82ad7e1848" exitCode=1 Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.649460 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-6jwl5" event={"ID":"4c649e41-10e8-4eee-bfc0-bf1a9409e421","Type":"ContainerDied","Data":"32cefc8394ca80e9b833f9439bdf862cf04c66049d85a0c77578bb82ad7e1848"} Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.649935 4812 scope.go:117] "RemoveContainer" containerID="32cefc8394ca80e9b833f9439bdf862cf04c66049d85a0c77578bb82ad7e1848" Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.653995 4812 generic.go:334] "Generic (PLEG): container finished" podID="d6f00506-8ef7-46ec-9492-01e0005f90d3" containerID="a2441127312ad5277ba2486c2d0f087210176039960daaf41da269f95dbca100" exitCode=1 Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.654072 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vncgw" event={"ID":"d6f00506-8ef7-46ec-9492-01e0005f90d3","Type":"ContainerDied","Data":"a2441127312ad5277ba2486c2d0f087210176039960daaf41da269f95dbca100"} Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.654570 4812 scope.go:117] "RemoveContainer" containerID="a2441127312ad5277ba2486c2d0f087210176039960daaf41da269f95dbca100" Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.656649 4812 generic.go:334] "Generic (PLEG): container finished" podID="39d8b8c1-7015-487a-9263-25531a65c48c" containerID="dc3206865b8b1fa593acbe457ead7ff70274aae9cbb488665ef47fce67e7ef95" exitCode=1 Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.656729 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jth28" event={"ID":"39d8b8c1-7015-487a-9263-25531a65c48c","Type":"ContainerDied","Data":"dc3206865b8b1fa593acbe457ead7ff70274aae9cbb488665ef47fce67e7ef95"} Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.657822 4812 scope.go:117] "RemoveContainer" containerID="dc3206865b8b1fa593acbe457ead7ff70274aae9cbb488665ef47fce67e7ef95" Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.660663 4812 generic.go:334] "Generic (PLEG): container finished" podID="18418f15-9ec8-48df-a761-118f45058d06" containerID="d00cf2235f4744c58203a99d5605089a7e152ebeaa45e0f543846cd12189ea5e" exitCode=1 Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.660735 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qn86p" event={"ID":"18418f15-9ec8-48df-a761-118f45058d06","Type":"ContainerDied","Data":"d00cf2235f4744c58203a99d5605089a7e152ebeaa45e0f543846cd12189ea5e"} Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.661392 4812 scope.go:117] "RemoveContainer" containerID="d00cf2235f4744c58203a99d5605089a7e152ebeaa45e0f543846cd12189ea5e" Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.663755 4812 generic.go:334] "Generic (PLEG): container finished" podID="2374c36a-5118-4a90-985c-1f80597d73af" containerID="3969c6027b71597f9d30beaa578c36aebed487523664a83323de6a99f3da11b9" exitCode=1 Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.663815 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7dd4g" event={"ID":"2374c36a-5118-4a90-985c-1f80597d73af","Type":"ContainerDied","Data":"3969c6027b71597f9d30beaa578c36aebed487523664a83323de6a99f3da11b9"} Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.664116 4812 scope.go:117] "RemoveContainer" containerID="3969c6027b71597f9d30beaa578c36aebed487523664a83323de6a99f3da11b9" Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.666508 4812 generic.go:334] "Generic (PLEG): container finished" podID="36e2e4fb-0369-4347-b8e6-3f4bb2e6f88b" containerID="04762816d14047ca3f7d0ca0fbd8f44c397c3905f2c884ae8fdc2d500a3f0bbc" exitCode=1 Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.666597 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-jnszn" event={"ID":"36e2e4fb-0369-4347-b8e6-3f4bb2e6f88b","Type":"ContainerDied","Data":"04762816d14047ca3f7d0ca0fbd8f44c397c3905f2c884ae8fdc2d500a3f0bbc"} Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.667158 4812 scope.go:117] "RemoveContainer" containerID="04762816d14047ca3f7d0ca0fbd8f44c397c3905f2c884ae8fdc2d500a3f0bbc" Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.669557 4812 generic.go:334] "Generic (PLEG): container finished" podID="16fc0b64-6599-4b8b-a0b7-b609dab9dd31" containerID="451f1386770552f65be8500268dacc98d675844b487465ac18bf1827f0b7b56a" exitCode=1 Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.669709 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-858778c9dc-ncdvr" event={"ID":"16fc0b64-6599-4b8b-a0b7-b609dab9dd31","Type":"ContainerDied","Data":"451f1386770552f65be8500268dacc98d675844b487465ac18bf1827f0b7b56a"} Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.671268 4812 scope.go:117] "RemoveContainer" containerID="451f1386770552f65be8500268dacc98d675844b487465ac18bf1827f0b7b56a" Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.681084 4812 generic.go:334] "Generic (PLEG): container finished" podID="54850143-f77e-4d59-bcc4-c5bd3bc85880" containerID="d3636554fd6483c45f349ea319aac606c837dc129a8c4b6677981fe3f3e094b4" exitCode=1 Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.681122 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-2d2j9" event={"ID":"54850143-f77e-4d59-bcc4-c5bd3bc85880","Type":"ContainerDied","Data":"d3636554fd6483c45f349ea319aac606c837dc129a8c4b6677981fe3f3e094b4"} Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.681817 4812 scope.go:117] "RemoveContainer" containerID="d3636554fd6483c45f349ea319aac606c837dc129a8c4b6677981fe3f3e094b4" Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.695893 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-jnszn" Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.732875 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7dd4g" Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.766161 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-774b86978c-zzhb4" Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.801729 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jth28" Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.879155 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-858778c9dc-ncdvr" Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.901581 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-pljcc" podUID="48707b31-d8f9-4a7e-a8b9-2728249f0a49" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.81:8081/readyz\": dial tcp 10.217.0.81:8081: connect: connection refused" Nov 25 17:11:12 crc kubenswrapper[4812]: I1125 17:11:12.942068 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-6jwl5" Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.053441 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qn86p" Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.071389 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-bsfwl" podUID="ab19d0cd-1e29-41af-892c-8f25f12b7f1c" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.85:8081/readyz\": dial tcp 10.217.0.85:8081: connect: connection refused" Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.081815 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vncgw" Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.241866 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kx2fg" podUID="bb01ec67-804d-4800-9ab4-e607563017b2" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.90:8081/readyz\": dial tcp 10.217.0.90:8081: connect: connection refused" Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.312864 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-2gwk8" Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.331278 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-h9l5r" Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.523822 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5cb74df96-jnhr9" Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.548186 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-864885998-vzgsw" Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.691305 4812 generic.go:334] "Generic (PLEG): container finished" podID="98133284-26db-4073-a43c-f9572476153c" containerID="edcf94dff91b9e5dfdc5bece5f9e35765e41c9ed9efbfd4b60351cfd6b0e4444" exitCode=1 Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.691372 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-vzgsw" event={"ID":"98133284-26db-4073-a43c-f9572476153c","Type":"ContainerDied","Data":"edcf94dff91b9e5dfdc5bece5f9e35765e41c9ed9efbfd4b60351cfd6b0e4444"} Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.691407 4812 scope.go:117] "RemoveContainer" containerID="1589153ca57a370c41d950d1ef41a4a6af430c627f5364bdb0ba6a4ace1e65d2" Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.691985 4812 scope.go:117] "RemoveContainer" containerID="edcf94dff91b9e5dfdc5bece5f9e35765e41c9ed9efbfd4b60351cfd6b0e4444" Nov 25 17:11:13 crc kubenswrapper[4812]: E1125 17:11:13.692399 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=watcher-operator-controller-manager-864885998-vzgsw_openstack-operators(98133284-26db-4073-a43c-f9572476153c)\"" pod="openstack-operators/watcher-operator-controller-manager-864885998-vzgsw" podUID="98133284-26db-4073-a43c-f9572476153c" Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.695454 4812 generic.go:334] "Generic (PLEG): container finished" podID="16fc0b64-6599-4b8b-a0b7-b609dab9dd31" containerID="8eb28280138f0530faa5c201d867e1d77c3034ef7338e9d1c27321b4e1aacf4e" exitCode=1 Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.695570 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-858778c9dc-ncdvr" event={"ID":"16fc0b64-6599-4b8b-a0b7-b609dab9dd31","Type":"ContainerDied","Data":"8eb28280138f0530faa5c201d867e1d77c3034ef7338e9d1c27321b4e1aacf4e"} Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.696037 4812 scope.go:117] "RemoveContainer" containerID="8eb28280138f0530faa5c201d867e1d77c3034ef7338e9d1c27321b4e1aacf4e" Nov 25 17:11:13 crc kubenswrapper[4812]: E1125 17:11:13.696331 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=infra-operator-controller-manager-858778c9dc-ncdvr_openstack-operators(16fc0b64-6599-4b8b-a0b7-b609dab9dd31)\"" pod="openstack-operators/infra-operator-controller-manager-858778c9dc-ncdvr" podUID="16fc0b64-6599-4b8b-a0b7-b609dab9dd31" Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.698723 4812 generic.go:334] "Generic (PLEG): container finished" podID="18418f15-9ec8-48df-a761-118f45058d06" containerID="b8442e1a39175832ba3feb326b882fbe2bb756a68c59c4d63fbbd625a9a0c3b8" exitCode=1 Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.698822 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qn86p" event={"ID":"18418f15-9ec8-48df-a761-118f45058d06","Type":"ContainerDied","Data":"b8442e1a39175832ba3feb326b882fbe2bb756a68c59c4d63fbbd625a9a0c3b8"} Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.699076 4812 scope.go:117] "RemoveContainer" containerID="b8442e1a39175832ba3feb326b882fbe2bb756a68c59c4d63fbbd625a9a0c3b8" Nov 25 17:11:13 crc kubenswrapper[4812]: E1125 17:11:13.699303 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=mariadb-operator-controller-manager-cb6c4fdb7-qn86p_openstack-operators(18418f15-9ec8-48df-a761-118f45058d06)\"" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qn86p" podUID="18418f15-9ec8-48df-a761-118f45058d06" Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.708807 4812 generic.go:334] "Generic (PLEG): container finished" podID="36e2e4fb-0369-4347-b8e6-3f4bb2e6f88b" containerID="82949c4052896799150c9ef1b6118840c51e178295d92a96631f45c8a2eccd94" exitCode=1 Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.708975 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-jnszn" event={"ID":"36e2e4fb-0369-4347-b8e6-3f4bb2e6f88b","Type":"ContainerDied","Data":"82949c4052896799150c9ef1b6118840c51e178295d92a96631f45c8a2eccd94"} Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.709360 4812 scope.go:117] "RemoveContainer" containerID="82949c4052896799150c9ef1b6118840c51e178295d92a96631f45c8a2eccd94" Nov 25 17:11:13 crc kubenswrapper[4812]: E1125 17:11:13.709668 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=cinder-operator-controller-manager-79856dc55c-jnszn_openstack-operators(36e2e4fb-0369-4347-b8e6-3f4bb2e6f88b)\"" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-jnszn" podUID="36e2e4fb-0369-4347-b8e6-3f4bb2e6f88b" Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.711710 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-2d2j9" event={"ID":"54850143-f77e-4d59-bcc4-c5bd3bc85880","Type":"ContainerStarted","Data":"4fbbacaa3dd8affccc2507370c3147b55504e697d273a2a625790ff826214cad"} Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.711959 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-2d2j9" Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.714803 4812 generic.go:334] "Generic (PLEG): container finished" podID="07907797-7edd-48e0-bb69-e42ad740f173" containerID="ff965109c7c78b9a06b587b6eb481aa6e13416cc40355af9d7bc1d217cf851c9" exitCode=1 Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.714857 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-h9l5r" event={"ID":"07907797-7edd-48e0-bb69-e42ad740f173","Type":"ContainerDied","Data":"ff965109c7c78b9a06b587b6eb481aa6e13416cc40355af9d7bc1d217cf851c9"} Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.715206 4812 scope.go:117] "RemoveContainer" containerID="ff965109c7c78b9a06b587b6eb481aa6e13416cc40355af9d7bc1d217cf851c9" Nov 25 17:11:13 crc kubenswrapper[4812]: E1125 17:11:13.715436 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=telemetry-operator-controller-manager-567f98c9d-h9l5r_openstack-operators(07907797-7edd-48e0-bb69-e42ad740f173)\"" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-h9l5r" podUID="07907797-7edd-48e0-bb69-e42ad740f173" Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.718430 4812 scope.go:117] "RemoveContainer" containerID="f66145e7cdf5247b4b160714dafbbe9896c9c319b2c62d097bb4b8c2fcfcfa4c" Nov 25 17:11:13 crc kubenswrapper[4812]: E1125 17:11:13.719017 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=barbican-operator-controller-manager-86dc4d89c8-clwgz_openstack-operators(bac38f31-ec39-46b9-9bac-2920864fb8a2)\"" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-clwgz" podUID="bac38f31-ec39-46b9-9bac-2920864fb8a2" Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.722114 4812 generic.go:334] "Generic (PLEG): container finished" podID="f3785053-5fa1-43b6-86f7-0182a1a49946" containerID="fe08c810d96a8afe62aca260ff216aeb0bd8cbb3e9add8d23ff941fa3dc0512e" exitCode=1 Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.722195 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-585789bb75-mft5q" event={"ID":"f3785053-5fa1-43b6-86f7-0182a1a49946","Type":"ContainerDied","Data":"fe08c810d96a8afe62aca260ff216aeb0bd8cbb3e9add8d23ff941fa3dc0512e"} Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.723404 4812 scope.go:117] "RemoveContainer" containerID="fe08c810d96a8afe62aca260ff216aeb0bd8cbb3e9add8d23ff941fa3dc0512e" Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.725482 4812 generic.go:334] "Generic (PLEG): container finished" podID="48707b31-d8f9-4a7e-a8b9-2728249f0a49" containerID="3cf1764fe170a250162b704ab351d0ad53470fbf6aa1f28928a046ea271b57b0" exitCode=1 Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.725583 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-pljcc" event={"ID":"48707b31-d8f9-4a7e-a8b9-2728249f0a49","Type":"ContainerDied","Data":"3cf1764fe170a250162b704ab351d0ad53470fbf6aa1f28928a046ea271b57b0"} Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.726389 4812 scope.go:117] "RemoveContainer" containerID="3cf1764fe170a250162b704ab351d0ad53470fbf6aa1f28928a046ea271b57b0" Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.736834 4812 generic.go:334] "Generic (PLEG): container finished" podID="d53b5c25-d66b-46c5-80a5-998eb9007598" containerID="f9154542015193ff79bc777ab7bb0962aa009ab2cb2f293a239ff779bf185158" exitCode=1 Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.736915 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-69b8c86946-ww5m5" event={"ID":"d53b5c25-d66b-46c5-80a5-998eb9007598","Type":"ContainerDied","Data":"f9154542015193ff79bc777ab7bb0962aa009ab2cb2f293a239ff779bf185158"} Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.737483 4812 scope.go:117] "RemoveContainer" containerID="f9154542015193ff79bc777ab7bb0962aa009ab2cb2f293a239ff779bf185158" Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.741345 4812 generic.go:334] "Generic (PLEG): container finished" podID="4c649e41-10e8-4eee-bfc0-bf1a9409e421" containerID="77492562f647df5b5e627db1c659255130e1fdab509b60836078f25baf5c86b4" exitCode=1 Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.741725 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-6jwl5" event={"ID":"4c649e41-10e8-4eee-bfc0-bf1a9409e421","Type":"ContainerDied","Data":"77492562f647df5b5e627db1c659255130e1fdab509b60836078f25baf5c86b4"} Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.744714 4812 generic.go:334] "Generic (PLEG): container finished" podID="d6f00506-8ef7-46ec-9492-01e0005f90d3" containerID="8d0939ff71bec4d973344176bda28eb7c70a23fa0d4332785bbb86e17daec07a" exitCode=1 Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.744741 4812 scope.go:117] "RemoveContainer" containerID="77492562f647df5b5e627db1c659255130e1fdab509b60836078f25baf5c86b4" Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.744760 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vncgw" event={"ID":"d6f00506-8ef7-46ec-9492-01e0005f90d3","Type":"ContainerDied","Data":"8d0939ff71bec4d973344176bda28eb7c70a23fa0d4332785bbb86e17daec07a"} Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.745110 4812 scope.go:117] "RemoveContainer" containerID="8d0939ff71bec4d973344176bda28eb7c70a23fa0d4332785bbb86e17daec07a" Nov 25 17:11:13 crc kubenswrapper[4812]: E1125 17:11:13.745191 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=keystone-operator-controller-manager-748dc6576f-6jwl5_openstack-operators(4c649e41-10e8-4eee-bfc0-bf1a9409e421)\"" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-6jwl5" podUID="4c649e41-10e8-4eee-bfc0-bf1a9409e421" Nov 25 17:11:13 crc kubenswrapper[4812]: E1125 17:11:13.745310 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=neutron-operator-controller-manager-7c57c8bbc4-vncgw_openstack-operators(d6f00506-8ef7-46ec-9492-01e0005f90d3)\"" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vncgw" podUID="d6f00506-8ef7-46ec-9492-01e0005f90d3" Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.750181 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cb74df96-jnhr9" event={"ID":"13981b57-58d8-42f8-a833-f9760f06df20","Type":"ContainerStarted","Data":"f3d5c1f90a59cead34d00fdb061767ca18bfe3fbebaa4252fa65a537233fccd0"} Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.750295 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5cb74df96-jnhr9" Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.752886 4812 generic.go:334] "Generic (PLEG): container finished" podID="bb01ec67-804d-4800-9ab4-e607563017b2" containerID="686a79e76b23ffc21086bfbd7749cdefa43a8a61ff9ee674b3064c65c9f2e5e4" exitCode=1 Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.752942 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kx2fg" event={"ID":"bb01ec67-804d-4800-9ab4-e607563017b2","Type":"ContainerDied","Data":"686a79e76b23ffc21086bfbd7749cdefa43a8a61ff9ee674b3064c65c9f2e5e4"} Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.753313 4812 scope.go:117] "RemoveContainer" containerID="686a79e76b23ffc21086bfbd7749cdefa43a8a61ff9ee674b3064c65c9f2e5e4" Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.759224 4812 generic.go:334] "Generic (PLEG): container finished" podID="2374c36a-5118-4a90-985c-1f80597d73af" containerID="fbb500c641b5cc074f3df777c793bf8e0e6b1cb01d65ac36e8ba24399c112100" exitCode=1 Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.759306 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7dd4g" event={"ID":"2374c36a-5118-4a90-985c-1f80597d73af","Type":"ContainerDied","Data":"fbb500c641b5cc074f3df777c793bf8e0e6b1cb01d65ac36e8ba24399c112100"} Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.760102 4812 scope.go:117] "RemoveContainer" containerID="fbb500c641b5cc074f3df777c793bf8e0e6b1cb01d65ac36e8ba24399c112100" Nov 25 17:11:13 crc kubenswrapper[4812]: E1125 17:11:13.760411 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=designate-operator-controller-manager-7d695c9b56-7dd4g_openstack-operators(2374c36a-5118-4a90-985c-1f80597d73af)\"" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7dd4g" podUID="2374c36a-5118-4a90-985c-1f80597d73af" Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.765228 4812 generic.go:334] "Generic (PLEG): container finished" podID="ab19d0cd-1e29-41af-892c-8f25f12b7f1c" containerID="576daabf7b43c05aeff710aea1f70add4856e5cfdbf21cbe8cb0fa3e3b092d5b" exitCode=1 Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.765290 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-bsfwl" event={"ID":"ab19d0cd-1e29-41af-892c-8f25f12b7f1c","Type":"ContainerDied","Data":"576daabf7b43c05aeff710aea1f70add4856e5cfdbf21cbe8cb0fa3e3b092d5b"} Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.765832 4812 scope.go:117] "RemoveContainer" containerID="576daabf7b43c05aeff710aea1f70add4856e5cfdbf21cbe8cb0fa3e3b092d5b" Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.768647 4812 generic.go:334] "Generic (PLEG): container finished" podID="7f72311f-8622-43f6-b499-8b52318b0e2a" containerID="0e3913a87e20a122062170a91a948fa308b67df80b0b866a702874de57840085" exitCode=1 Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.768677 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-zzhb4" event={"ID":"7f72311f-8622-43f6-b499-8b52318b0e2a","Type":"ContainerDied","Data":"0e3913a87e20a122062170a91a948fa308b67df80b0b866a702874de57840085"} Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.769393 4812 scope.go:117] "RemoveContainer" containerID="0e3913a87e20a122062170a91a948fa308b67df80b0b866a702874de57840085" Nov 25 17:11:13 crc kubenswrapper[4812]: E1125 17:11:13.769713 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=heat-operator-controller-manager-774b86978c-zzhb4_openstack-operators(7f72311f-8622-43f6-b499-8b52318b0e2a)\"" pod="openstack-operators/heat-operator-controller-manager-774b86978c-zzhb4" podUID="7f72311f-8622-43f6-b499-8b52318b0e2a" Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.773579 4812 generic.go:334] "Generic (PLEG): container finished" podID="10dee73c-a6d8-429d-b5c0-9226eec6d1f3" containerID="6b93e40e0946b4e1ee373e03b1f209c808b6cc86be77b380b3da4174a7b4149b" exitCode=1 Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.773640 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-2gwk8" event={"ID":"10dee73c-a6d8-429d-b5c0-9226eec6d1f3","Type":"ContainerDied","Data":"6b93e40e0946b4e1ee373e03b1f209c808b6cc86be77b380b3da4174a7b4149b"} Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.774283 4812 scope.go:117] "RemoveContainer" containerID="6b93e40e0946b4e1ee373e03b1f209c808b6cc86be77b380b3da4174a7b4149b" Nov 25 17:11:13 crc kubenswrapper[4812]: E1125 17:11:13.774632 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=swift-operator-controller-manager-6fdc4fcf86-2gwk8_openstack-operators(10dee73c-a6d8-429d-b5c0-9226eec6d1f3)\"" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-2gwk8" podUID="10dee73c-a6d8-429d-b5c0-9226eec6d1f3" Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.776818 4812 generic.go:334] "Generic (PLEG): container finished" podID="1e943cab-36af-421d-b7a4-24010912da99" containerID="a4ce16e6f8c8d3c5f3a6110935c634da778d04f55faa4605827a16968d764769" exitCode=1 Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.777108 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-mrlbz" event={"ID":"1e943cab-36af-421d-b7a4-24010912da99","Type":"ContainerDied","Data":"a4ce16e6f8c8d3c5f3a6110935c634da778d04f55faa4605827a16968d764769"} Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.777900 4812 scope.go:117] "RemoveContainer" containerID="a4ce16e6f8c8d3c5f3a6110935c634da778d04f55faa4605827a16968d764769" Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.779649 4812 generic.go:334] "Generic (PLEG): container finished" podID="39d8b8c1-7015-487a-9263-25531a65c48c" containerID="3a4e07f58fa1c309f417fd4e1d09f6e3f4c296183b158bec11624c5828ad4bc5" exitCode=1 Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.779715 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jth28" event={"ID":"39d8b8c1-7015-487a-9263-25531a65c48c","Type":"ContainerDied","Data":"3a4e07f58fa1c309f417fd4e1d09f6e3f4c296183b158bec11624c5828ad4bc5"} Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.780272 4812 scope.go:117] "RemoveContainer" containerID="3a4e07f58fa1c309f417fd4e1d09f6e3f4c296183b158bec11624c5828ad4bc5" Nov 25 17:11:13 crc kubenswrapper[4812]: E1125 17:11:13.781682 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=horizon-operator-controller-manager-68c9694994-jth28_openstack-operators(39d8b8c1-7015-487a-9263-25531a65c48c)\"" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jth28" podUID="39d8b8c1-7015-487a-9263-25531a65c48c" Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.816569 4812 scope.go:117] "RemoveContainer" containerID="451f1386770552f65be8500268dacc98d675844b487465ac18bf1827f0b7b56a" Nov 25 17:11:13 crc kubenswrapper[4812]: I1125 17:11:13.952296 4812 scope.go:117] "RemoveContainer" containerID="d00cf2235f4744c58203a99d5605089a7e152ebeaa45e0f543846cd12189ea5e" Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:13.995303 4812 scope.go:117] "RemoveContainer" containerID="04762816d14047ca3f7d0ca0fbd8f44c397c3905f2c884ae8fdc2d500a3f0bbc" Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.058191 4812 scope.go:117] "RemoveContainer" containerID="0f80e5511d8dc1a0e4560528ad2a6eade741bdab9fcd954359b8192391929042" Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.084643 4812 scope.go:117] "RemoveContainer" containerID="32cefc8394ca80e9b833f9439bdf862cf04c66049d85a0c77578bb82ad7e1848" Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.122684 4812 scope.go:117] "RemoveContainer" containerID="a2441127312ad5277ba2486c2d0f087210176039960daaf41da269f95dbca100" Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.152659 4812 scope.go:117] "RemoveContainer" containerID="3969c6027b71597f9d30beaa578c36aebed487523664a83323de6a99f3da11b9" Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.201261 4812 scope.go:117] "RemoveContainer" containerID="a57d9f197214c8f522b46a33a7f8df91bce62137fe97771b61ffdd37fdc75b36" Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.271378 4812 scope.go:117] "RemoveContainer" containerID="c497239c53b0bb0aea6346e5b2828b86590ebf4ea5affca580af0e62937a59b3" Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.302899 4812 scope.go:117] "RemoveContainer" containerID="dc3206865b8b1fa593acbe457ead7ff70274aae9cbb488665ef47fce67e7ef95" Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.427721 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/kube-state-metrics-0" podUID="91bfadbe-a98d-49e4-88a9-97be162972a5" containerName="kube-state-metrics" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.428495 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/kube-state-metrics-0" Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.430477 4812 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="kube-state-metrics" containerStatusID={"Type":"cri-o","ID":"9276970c7d082980d12fd08e86ecc74f850d83c8eb74759a48e704f346e2c5cc"} pod="openstack/kube-state-metrics-0" containerMessage="Container kube-state-metrics failed liveness probe, will be restarted" Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.430731 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="91bfadbe-a98d-49e4-88a9-97be162972a5" containerName="kube-state-metrics" containerID="cri-o://9276970c7d082980d12fd08e86ecc74f850d83c8eb74759a48e704f346e2c5cc" gracePeriod=30 Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.791278 4812 generic.go:334] "Generic (PLEG): container finished" podID="1e943cab-36af-421d-b7a4-24010912da99" containerID="68de025290379e872cba6ed70e5d1c3d5de1edcc88586946094d5afd6b734b13" exitCode=1 Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.791335 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-mrlbz" event={"ID":"1e943cab-36af-421d-b7a4-24010912da99","Type":"ContainerDied","Data":"68de025290379e872cba6ed70e5d1c3d5de1edcc88586946094d5afd6b734b13"} Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.791366 4812 scope.go:117] "RemoveContainer" containerID="a4ce16e6f8c8d3c5f3a6110935c634da778d04f55faa4605827a16968d764769" Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.792312 4812 scope.go:117] "RemoveContainer" containerID="68de025290379e872cba6ed70e5d1c3d5de1edcc88586946094d5afd6b734b13" Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.792783 4812 generic.go:334] "Generic (PLEG): container finished" podID="b8169d76-3e78-4510-aa23-e8d733d495a2" containerID="20afee5fa98cb3825815039bb3bc7125d635c39d86cc179bcbd07bf4fc145d43" exitCode=1 Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.792831 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-sb4m4" event={"ID":"b8169d76-3e78-4510-aa23-e8d733d495a2","Type":"ContainerDied","Data":"20afee5fa98cb3825815039bb3bc7125d635c39d86cc179bcbd07bf4fc145d43"} Nov 25 17:11:14 crc kubenswrapper[4812]: E1125 17:11:14.792920 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ovn-operator-controller-manager-66cf5c67ff-mrlbz_openstack-operators(1e943cab-36af-421d-b7a4-24010912da99)\"" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-mrlbz" podUID="1e943cab-36af-421d-b7a4-24010912da99" Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.793508 4812 scope.go:117] "RemoveContainer" containerID="20afee5fa98cb3825815039bb3bc7125d635c39d86cc179bcbd07bf4fc145d43" Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.797448 4812 generic.go:334] "Generic (PLEG): container finished" podID="ab19d0cd-1e29-41af-892c-8f25f12b7f1c" containerID="e9d2fc83a7593f7c1635ae3b7c566c94ee6d7a1f74f371ae9f41e742697c99dc" exitCode=1 Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.797621 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-bsfwl" event={"ID":"ab19d0cd-1e29-41af-892c-8f25f12b7f1c","Type":"ContainerDied","Data":"e9d2fc83a7593f7c1635ae3b7c566c94ee6d7a1f74f371ae9f41e742697c99dc"} Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.798653 4812 scope.go:117] "RemoveContainer" containerID="e9d2fc83a7593f7c1635ae3b7c566c94ee6d7a1f74f371ae9f41e742697c99dc" Nov 25 17:11:14 crc kubenswrapper[4812]: E1125 17:11:14.799048 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=nova-operator-controller-manager-79556f57fc-bsfwl_openstack-operators(ab19d0cd-1e29-41af-892c-8f25f12b7f1c)\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-bsfwl" podUID="ab19d0cd-1e29-41af-892c-8f25f12b7f1c" Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.804144 4812 scope.go:117] "RemoveContainer" containerID="edcf94dff91b9e5dfdc5bece5f9e35765e41c9ed9efbfd4b60351cfd6b0e4444" Nov 25 17:11:14 crc kubenswrapper[4812]: E1125 17:11:14.804402 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=watcher-operator-controller-manager-864885998-vzgsw_openstack-operators(98133284-26db-4073-a43c-f9572476153c)\"" pod="openstack-operators/watcher-operator-controller-manager-864885998-vzgsw" podUID="98133284-26db-4073-a43c-f9572476153c" Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.807398 4812 generic.go:334] "Generic (PLEG): container finished" podID="48707b31-d8f9-4a7e-a8b9-2728249f0a49" containerID="9b1184f1cf904337a82631fc47973387b0346ad5e9c2e95de6b8372d1a7db551" exitCode=1 Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.807552 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-pljcc" event={"ID":"48707b31-d8f9-4a7e-a8b9-2728249f0a49","Type":"ContainerDied","Data":"9b1184f1cf904337a82631fc47973387b0346ad5e9c2e95de6b8372d1a7db551"} Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.808139 4812 scope.go:117] "RemoveContainer" containerID="9b1184f1cf904337a82631fc47973387b0346ad5e9c2e95de6b8372d1a7db551" Nov 25 17:11:14 crc kubenswrapper[4812]: E1125 17:11:14.808434 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ironic-operator-controller-manager-5bfcdc958c-pljcc_openstack-operators(48707b31-d8f9-4a7e-a8b9-2728249f0a49)\"" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-pljcc" podUID="48707b31-d8f9-4a7e-a8b9-2728249f0a49" Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.809684 4812 generic.go:334] "Generic (PLEG): container finished" podID="91bfadbe-a98d-49e4-88a9-97be162972a5" containerID="9276970c7d082980d12fd08e86ecc74f850d83c8eb74759a48e704f346e2c5cc" exitCode=2 Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.809750 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"91bfadbe-a98d-49e4-88a9-97be162972a5","Type":"ContainerDied","Data":"9276970c7d082980d12fd08e86ecc74f850d83c8eb74759a48e704f346e2c5cc"} Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.812358 4812 scope.go:117] "RemoveContainer" containerID="3a4e07f58fa1c309f417fd4e1d09f6e3f4c296183b158bec11624c5828ad4bc5" Nov 25 17:11:14 crc kubenswrapper[4812]: E1125 17:11:14.812592 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=horizon-operator-controller-manager-68c9694994-jth28_openstack-operators(39d8b8c1-7015-487a-9263-25531a65c48c)\"" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jth28" podUID="39d8b8c1-7015-487a-9263-25531a65c48c" Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.816108 4812 generic.go:334] "Generic (PLEG): container finished" podID="bb01ec67-804d-4800-9ab4-e607563017b2" containerID="ee242746ec3b71c79ddc4928f6ad59d8abf30e80b8e44dce44c22d89ecbf2698" exitCode=1 Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.816179 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kx2fg" event={"ID":"bb01ec67-804d-4800-9ab4-e607563017b2","Type":"ContainerDied","Data":"ee242746ec3b71c79ddc4928f6ad59d8abf30e80b8e44dce44c22d89ecbf2698"} Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.816844 4812 scope.go:117] "RemoveContainer" containerID="ee242746ec3b71c79ddc4928f6ad59d8abf30e80b8e44dce44c22d89ecbf2698" Nov 25 17:11:14 crc kubenswrapper[4812]: E1125 17:11:14.817261 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=placement-operator-controller-manager-5db546f9d9-kx2fg_openstack-operators(bb01ec67-804d-4800-9ab4-e607563017b2)\"" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kx2fg" podUID="bb01ec67-804d-4800-9ab4-e607563017b2" Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.822136 4812 scope.go:117] "RemoveContainer" containerID="8eb28280138f0530faa5c201d867e1d77c3034ef7338e9d1c27321b4e1aacf4e" Nov 25 17:11:14 crc kubenswrapper[4812]: E1125 17:11:14.822379 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=infra-operator-controller-manager-858778c9dc-ncdvr_openstack-operators(16fc0b64-6599-4b8b-a0b7-b609dab9dd31)\"" pod="openstack-operators/infra-operator-controller-manager-858778c9dc-ncdvr" podUID="16fc0b64-6599-4b8b-a0b7-b609dab9dd31" Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.825907 4812 scope.go:117] "RemoveContainer" containerID="fbb500c641b5cc074f3df777c793bf8e0e6b1cb01d65ac36e8ba24399c112100" Nov 25 17:11:14 crc kubenswrapper[4812]: E1125 17:11:14.826281 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=designate-operator-controller-manager-7d695c9b56-7dd4g_openstack-operators(2374c36a-5118-4a90-985c-1f80597d73af)\"" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7dd4g" podUID="2374c36a-5118-4a90-985c-1f80597d73af" Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.829935 4812 scope.go:117] "RemoveContainer" containerID="0e3913a87e20a122062170a91a948fa308b67df80b0b866a702874de57840085" Nov 25 17:11:14 crc kubenswrapper[4812]: E1125 17:11:14.830272 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=heat-operator-controller-manager-774b86978c-zzhb4_openstack-operators(7f72311f-8622-43f6-b499-8b52318b0e2a)\"" pod="openstack-operators/heat-operator-controller-manager-774b86978c-zzhb4" podUID="7f72311f-8622-43f6-b499-8b52318b0e2a" Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.830889 4812 generic.go:334] "Generic (PLEG): container finished" podID="f3785053-5fa1-43b6-86f7-0182a1a49946" containerID="63773267ebd34253f47b369dfc178e87cc4289935354908615eceb688e1bee2e" exitCode=1 Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.830958 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-585789bb75-mft5q" event={"ID":"f3785053-5fa1-43b6-86f7-0182a1a49946","Type":"ContainerDied","Data":"63773267ebd34253f47b369dfc178e87cc4289935354908615eceb688e1bee2e"} Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.831253 4812 scope.go:117] "RemoveContainer" containerID="63773267ebd34253f47b369dfc178e87cc4289935354908615eceb688e1bee2e" Nov 25 17:11:14 crc kubenswrapper[4812]: E1125 17:11:14.831574 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=openstack-operator-controller-manager-585789bb75-mft5q_openstack-operators(f3785053-5fa1-43b6-86f7-0182a1a49946)\"" pod="openstack-operators/openstack-operator-controller-manager-585789bb75-mft5q" podUID="f3785053-5fa1-43b6-86f7-0182a1a49946" Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.834838 4812 scope.go:117] "RemoveContainer" containerID="6b93e40e0946b4e1ee373e03b1f209c808b6cc86be77b380b3da4174a7b4149b" Nov 25 17:11:14 crc kubenswrapper[4812]: E1125 17:11:14.835178 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=swift-operator-controller-manager-6fdc4fcf86-2gwk8_openstack-operators(10dee73c-a6d8-429d-b5c0-9226eec6d1f3)\"" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-2gwk8" podUID="10dee73c-a6d8-429d-b5c0-9226eec6d1f3" Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.838310 4812 scope.go:117] "RemoveContainer" containerID="b8442e1a39175832ba3feb326b882fbe2bb756a68c59c4d63fbbd625a9a0c3b8" Nov 25 17:11:14 crc kubenswrapper[4812]: E1125 17:11:14.838623 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=mariadb-operator-controller-manager-cb6c4fdb7-qn86p_openstack-operators(18418f15-9ec8-48df-a761-118f45058d06)\"" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qn86p" podUID="18418f15-9ec8-48df-a761-118f45058d06" Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.841828 4812 scope.go:117] "RemoveContainer" containerID="ff965109c7c78b9a06b587b6eb481aa6e13416cc40355af9d7bc1d217cf851c9" Nov 25 17:11:14 crc kubenswrapper[4812]: E1125 17:11:14.842139 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=telemetry-operator-controller-manager-567f98c9d-h9l5r_openstack-operators(07907797-7edd-48e0-bb69-e42ad740f173)\"" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-h9l5r" podUID="07907797-7edd-48e0-bb69-e42ad740f173" Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.845369 4812 generic.go:334] "Generic (PLEG): container finished" podID="d53b5c25-d66b-46c5-80a5-998eb9007598" containerID="ea12be0fb67be05c0fb3a3a37cac5cbc679efaf06295f7c24c2aa14753bf9224" exitCode=1 Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.845449 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-69b8c86946-ww5m5" event={"ID":"d53b5c25-d66b-46c5-80a5-998eb9007598","Type":"ContainerDied","Data":"ea12be0fb67be05c0fb3a3a37cac5cbc679efaf06295f7c24c2aa14753bf9224"} Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.846095 4812 scope.go:117] "RemoveContainer" containerID="ea12be0fb67be05c0fb3a3a37cac5cbc679efaf06295f7c24c2aa14753bf9224" Nov 25 17:11:14 crc kubenswrapper[4812]: E1125 17:11:14.846498 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=glance-operator-controller-manager-69b8c86946-ww5m5_openstack-operators(d53b5c25-d66b-46c5-80a5-998eb9007598)\"" pod="openstack-operators/glance-operator-controller-manager-69b8c86946-ww5m5" podUID="d53b5c25-d66b-46c5-80a5-998eb9007598" Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.847810 4812 scope.go:117] "RemoveContainer" containerID="8d0939ff71bec4d973344176bda28eb7c70a23fa0d4332785bbb86e17daec07a" Nov 25 17:11:14 crc kubenswrapper[4812]: E1125 17:11:14.848087 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=neutron-operator-controller-manager-7c57c8bbc4-vncgw_openstack-operators(d6f00506-8ef7-46ec-9492-01e0005f90d3)\"" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vncgw" podUID="d6f00506-8ef7-46ec-9492-01e0005f90d3" Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.850618 4812 scope.go:117] "RemoveContainer" containerID="82949c4052896799150c9ef1b6118840c51e178295d92a96631f45c8a2eccd94" Nov 25 17:11:14 crc kubenswrapper[4812]: E1125 17:11:14.850935 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=cinder-operator-controller-manager-79856dc55c-jnszn_openstack-operators(36e2e4fb-0369-4347-b8e6-3f4bb2e6f88b)\"" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-jnszn" podUID="36e2e4fb-0369-4347-b8e6-3f4bb2e6f88b" Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.851779 4812 generic.go:334] "Generic (PLEG): container finished" podID="15120e64-d800-43d8-b8c3-673e5854baef" containerID="f494fb07ebdc8dc1cc239fd2d67ed84cf5d854dc59436114131a94e47d2a56ef" exitCode=1 Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.851854 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-6kqrr" event={"ID":"15120e64-d800-43d8-b8c3-673e5854baef","Type":"ContainerDied","Data":"f494fb07ebdc8dc1cc239fd2d67ed84cf5d854dc59436114131a94e47d2a56ef"} Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.853285 4812 scope.go:117] "RemoveContainer" containerID="f494fb07ebdc8dc1cc239fd2d67ed84cf5d854dc59436114131a94e47d2a56ef" Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.854414 4812 generic.go:334] "Generic (PLEG): container finished" podID="4709cc72-b3b5-4bf6-ac84-45cc1f6bc2bf" containerID="a36bab60c803b3f20aad316114a6690e56d66edd2b1bad28e3a95f718ad314bf" exitCode=1 Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.854684 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-vnjdk" event={"ID":"4709cc72-b3b5-4bf6-ac84-45cc1f6bc2bf","Type":"ContainerDied","Data":"a36bab60c803b3f20aad316114a6690e56d66edd2b1bad28e3a95f718ad314bf"} Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.855245 4812 scope.go:117] "RemoveContainer" containerID="a36bab60c803b3f20aad316114a6690e56d66edd2b1bad28e3a95f718ad314bf" Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.859228 4812 scope.go:117] "RemoveContainer" containerID="77492562f647df5b5e627db1c659255130e1fdab509b60836078f25baf5c86b4" Nov 25 17:11:14 crc kubenswrapper[4812]: E1125 17:11:14.859572 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=keystone-operator-controller-manager-748dc6576f-6jwl5_openstack-operators(4c649e41-10e8-4eee-bfc0-bf1a9409e421)\"" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-6jwl5" podUID="4c649e41-10e8-4eee-bfc0-bf1a9409e421" Nov 25 17:11:14 crc kubenswrapper[4812]: I1125 17:11:14.928191 4812 scope.go:117] "RemoveContainer" containerID="576daabf7b43c05aeff710aea1f70add4856e5cfdbf21cbe8cb0fa3e3b092d5b" Nov 25 17:11:15 crc kubenswrapper[4812]: I1125 17:11:15.037818 4812 scope.go:117] "RemoveContainer" containerID="3cf1764fe170a250162b704ab351d0ad53470fbf6aa1f28928a046ea271b57b0" Nov 25 17:11:15 crc kubenswrapper[4812]: I1125 17:11:15.065659 4812 scope.go:117] "RemoveContainer" containerID="686a79e76b23ffc21086bfbd7749cdefa43a8a61ff9ee674b3064c65c9f2e5e4" Nov 25 17:11:15 crc kubenswrapper[4812]: I1125 17:11:15.097373 4812 scope.go:117] "RemoveContainer" containerID="fe08c810d96a8afe62aca260ff216aeb0bd8cbb3e9add8d23ff941fa3dc0512e" Nov 25 17:11:15 crc kubenswrapper[4812]: I1125 17:11:15.127457 4812 scope.go:117] "RemoveContainer" containerID="f9154542015193ff79bc777ab7bb0962aa009ab2cb2f293a239ff779bf185158" Nov 25 17:11:15 crc kubenswrapper[4812]: I1125 17:11:15.874369 4812 scope.go:117] "RemoveContainer" containerID="f793eb47f68a598ad5c2a833f25c61b0b0a48864de5c4e91b257317dbe7c3948" Nov 25 17:11:15 crc kubenswrapper[4812]: I1125 17:11:15.886399 4812 scope.go:117] "RemoveContainer" containerID="9b1184f1cf904337a82631fc47973387b0346ad5e9c2e95de6b8372d1a7db551" Nov 25 17:11:15 crc kubenswrapper[4812]: E1125 17:11:15.886699 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ironic-operator-controller-manager-5bfcdc958c-pljcc_openstack-operators(48707b31-d8f9-4a7e-a8b9-2728249f0a49)\"" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-pljcc" podUID="48707b31-d8f9-4a7e-a8b9-2728249f0a49" Nov 25 17:11:15 crc kubenswrapper[4812]: I1125 17:11:15.892881 4812 generic.go:334] "Generic (PLEG): container finished" podID="15120e64-d800-43d8-b8c3-673e5854baef" containerID="ae21c28290e91db5ab2a8cee0dcabe5e11851b94c6512e78f0c84a4fb8de75fb" exitCode=1 Nov 25 17:11:15 crc kubenswrapper[4812]: I1125 17:11:15.892944 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-6kqrr" event={"ID":"15120e64-d800-43d8-b8c3-673e5854baef","Type":"ContainerDied","Data":"ae21c28290e91db5ab2a8cee0dcabe5e11851b94c6512e78f0c84a4fb8de75fb"} Nov 25 17:11:15 crc kubenswrapper[4812]: I1125 17:11:15.892976 4812 scope.go:117] "RemoveContainer" containerID="f494fb07ebdc8dc1cc239fd2d67ed84cf5d854dc59436114131a94e47d2a56ef" Nov 25 17:11:15 crc kubenswrapper[4812]: I1125 17:11:15.893775 4812 scope.go:117] "RemoveContainer" containerID="ae21c28290e91db5ab2a8cee0dcabe5e11851b94c6512e78f0c84a4fb8de75fb" Nov 25 17:11:15 crc kubenswrapper[4812]: E1125 17:11:15.894223 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=octavia-operator-controller-manager-fd75fd47d-6kqrr_openstack-operators(15120e64-d800-43d8-b8c3-673e5854baef)\"" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-6kqrr" podUID="15120e64-d800-43d8-b8c3-673e5854baef" Nov 25 17:11:15 crc kubenswrapper[4812]: I1125 17:11:15.896731 4812 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="18c7bb9c-67c0-4564-a4a1-b49a0c047768" Nov 25 17:11:15 crc kubenswrapper[4812]: I1125 17:11:15.897930 4812 generic.go:334] "Generic (PLEG): container finished" podID="4709cc72-b3b5-4bf6-ac84-45cc1f6bc2bf" containerID="468b00f31e6fe2ed94df170c5ba250d570c46c542a573b01b2e33967b6148db5" exitCode=1 Nov 25 17:11:15 crc kubenswrapper[4812]: I1125 17:11:15.897998 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-vnjdk" event={"ID":"4709cc72-b3b5-4bf6-ac84-45cc1f6bc2bf","Type":"ContainerDied","Data":"468b00f31e6fe2ed94df170c5ba250d570c46c542a573b01b2e33967b6148db5"} Nov 25 17:11:15 crc kubenswrapper[4812]: I1125 17:11:15.898631 4812 scope.go:117] "RemoveContainer" containerID="468b00f31e6fe2ed94df170c5ba250d570c46c542a573b01b2e33967b6148db5" Nov 25 17:11:15 crc kubenswrapper[4812]: E1125 17:11:15.898909 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=manila-operator-controller-manager-58bb8d67cc-vnjdk_openstack-operators(4709cc72-b3b5-4bf6-ac84-45cc1f6bc2bf)\"" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-vnjdk" podUID="4709cc72-b3b5-4bf6-ac84-45cc1f6bc2bf" Nov 25 17:11:15 crc kubenswrapper[4812]: I1125 17:11:15.900249 4812 generic.go:334] "Generic (PLEG): container finished" podID="91bfadbe-a98d-49e4-88a9-97be162972a5" containerID="eb78bdceb97f20a3c55cc12c342d5e4c9260659332ebd445ff2a0f4de7efd612" exitCode=1 Nov 25 17:11:15 crc kubenswrapper[4812]: I1125 17:11:15.900309 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"91bfadbe-a98d-49e4-88a9-97be162972a5","Type":"ContainerDied","Data":"eb78bdceb97f20a3c55cc12c342d5e4c9260659332ebd445ff2a0f4de7efd612"} Nov 25 17:11:15 crc kubenswrapper[4812]: I1125 17:11:15.901078 4812 scope.go:117] "RemoveContainer" containerID="eb78bdceb97f20a3c55cc12c342d5e4c9260659332ebd445ff2a0f4de7efd612" Nov 25 17:11:15 crc kubenswrapper[4812]: I1125 17:11:15.910737 4812 generic.go:334] "Generic (PLEG): container finished" podID="b8169d76-3e78-4510-aa23-e8d733d495a2" containerID="ae5aeb8183bcd8df53a3113e7b4bb442d72263eef27b41eb49e44b6868635389" exitCode=1 Nov 25 17:11:15 crc kubenswrapper[4812]: I1125 17:11:15.910826 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-sb4m4" event={"ID":"b8169d76-3e78-4510-aa23-e8d733d495a2","Type":"ContainerDied","Data":"ae5aeb8183bcd8df53a3113e7b4bb442d72263eef27b41eb49e44b6868635389"} Nov 25 17:11:15 crc kubenswrapper[4812]: I1125 17:11:15.911588 4812 scope.go:117] "RemoveContainer" containerID="ae5aeb8183bcd8df53a3113e7b4bb442d72263eef27b41eb49e44b6868635389" Nov 25 17:11:15 crc kubenswrapper[4812]: E1125 17:11:15.911911 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=operator pod=rabbitmq-cluster-operator-manager-668c99d594-sb4m4_openstack-operators(b8169d76-3e78-4510-aa23-e8d733d495a2)\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-sb4m4" podUID="b8169d76-3e78-4510-aa23-e8d733d495a2" Nov 25 17:11:16 crc kubenswrapper[4812]: I1125 17:11:16.004425 4812 scope.go:117] "RemoveContainer" containerID="a36bab60c803b3f20aad316114a6690e56d66edd2b1bad28e3a95f718ad314bf" Nov 25 17:11:16 crc kubenswrapper[4812]: I1125 17:11:16.068048 4812 scope.go:117] "RemoveContainer" containerID="9276970c7d082980d12fd08e86ecc74f850d83c8eb74759a48e704f346e2c5cc" Nov 25 17:11:16 crc kubenswrapper[4812]: I1125 17:11:16.090993 4812 scope.go:117] "RemoveContainer" containerID="20afee5fa98cb3825815039bb3bc7125d635c39d86cc179bcbd07bf4fc145d43" Nov 25 17:11:16 crc kubenswrapper[4812]: I1125 17:11:16.394930 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 17:11:16 crc kubenswrapper[4812]: I1125 17:11:16.926968 4812 generic.go:334] "Generic (PLEG): container finished" podID="91bfadbe-a98d-49e4-88a9-97be162972a5" containerID="c59fbae8b28246efe42480c116562d22cc798259dc7368bdd00fc02a1da79561" exitCode=1 Nov 25 17:11:16 crc kubenswrapper[4812]: I1125 17:11:16.927068 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"91bfadbe-a98d-49e4-88a9-97be162972a5","Type":"ContainerDied","Data":"c59fbae8b28246efe42480c116562d22cc798259dc7368bdd00fc02a1da79561"} Nov 25 17:11:16 crc kubenswrapper[4812]: I1125 17:11:16.927360 4812 scope.go:117] "RemoveContainer" containerID="eb78bdceb97f20a3c55cc12c342d5e4c9260659332ebd445ff2a0f4de7efd612" Nov 25 17:11:16 crc kubenswrapper[4812]: I1125 17:11:16.927666 4812 scope.go:117] "RemoveContainer" containerID="c59fbae8b28246efe42480c116562d22cc798259dc7368bdd00fc02a1da79561" Nov 25 17:11:16 crc kubenswrapper[4812]: E1125 17:11:16.927982 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-state-metrics pod=kube-state-metrics-0_openstack(91bfadbe-a98d-49e4-88a9-97be162972a5)\"" pod="openstack/kube-state-metrics-0" podUID="91bfadbe-a98d-49e4-88a9-97be162972a5" Nov 25 17:11:16 crc kubenswrapper[4812]: I1125 17:11:16.931100 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6f7b877f74-qcc8n" event={"ID":"06fdd0d8-45b7-4787-9f77-24f76fccc672","Type":"ContainerDied","Data":"91928ed0030dbd69ea3befde813631b393d823ccd40abf3ad6b9f0e037314141"} Nov 25 17:11:16 crc kubenswrapper[4812]: I1125 17:11:16.931948 4812 scope.go:117] "RemoveContainer" containerID="91928ed0030dbd69ea3befde813631b393d823ccd40abf3ad6b9f0e037314141" Nov 25 17:11:16 crc kubenswrapper[4812]: E1125 17:11:16.932158 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=metallb-operator-controller-manager-6f7b877f74-qcc8n_metallb-system(06fdd0d8-45b7-4787-9f77-24f76fccc672)\"" pod="metallb-system/metallb-operator-controller-manager-6f7b877f74-qcc8n" podUID="06fdd0d8-45b7-4787-9f77-24f76fccc672" Nov 25 17:11:16 crc kubenswrapper[4812]: I1125 17:11:16.936618 4812 generic.go:334] "Generic (PLEG): container finished" podID="06fdd0d8-45b7-4787-9f77-24f76fccc672" containerID="91928ed0030dbd69ea3befde813631b393d823ccd40abf3ad6b9f0e037314141" exitCode=1 Nov 25 17:11:17 crc kubenswrapper[4812]: I1125 17:11:17.038539 4812 scope.go:117] "RemoveContainer" containerID="f793eb47f68a598ad5c2a833f25c61b0b0a48864de5c4e91b257317dbe7c3948" Nov 25 17:11:17 crc kubenswrapper[4812]: I1125 17:11:17.224033 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-585789bb75-mft5q" Nov 25 17:11:17 crc kubenswrapper[4812]: I1125 17:11:17.224485 4812 scope.go:117] "RemoveContainer" containerID="63773267ebd34253f47b369dfc178e87cc4289935354908615eceb688e1bee2e" Nov 25 17:11:17 crc kubenswrapper[4812]: E1125 17:11:17.224741 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=openstack-operator-controller-manager-585789bb75-mft5q_openstack-operators(f3785053-5fa1-43b6-86f7-0182a1a49946)\"" pod="openstack-operators/openstack-operator-controller-manager-585789bb75-mft5q" podUID="f3785053-5fa1-43b6-86f7-0182a1a49946" Nov 25 17:11:17 crc kubenswrapper[4812]: I1125 17:11:17.949038 4812 scope.go:117] "RemoveContainer" containerID="c59fbae8b28246efe42480c116562d22cc798259dc7368bdd00fc02a1da79561" Nov 25 17:11:17 crc kubenswrapper[4812]: E1125 17:11:17.949580 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-state-metrics pod=kube-state-metrics-0_openstack(91bfadbe-a98d-49e4-88a9-97be162972a5)\"" pod="openstack/kube-state-metrics-0" podUID="91bfadbe-a98d-49e4-88a9-97be162972a5" Nov 25 17:11:19 crc kubenswrapper[4812]: I1125 17:11:19.933469 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 25 17:11:19 crc kubenswrapper[4812]: I1125 17:11:19.942291 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 25 17:11:20 crc kubenswrapper[4812]: I1125 17:11:20.712406 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 17:11:20 crc kubenswrapper[4812]: I1125 17:11:20.720857 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 25 17:11:20 crc kubenswrapper[4812]: I1125 17:11:20.744170 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 25 17:11:20 crc kubenswrapper[4812]: I1125 17:11:20.883263 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 25 17:11:21 crc kubenswrapper[4812]: I1125 17:11:21.097168 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 25 17:11:21 crc kubenswrapper[4812]: I1125 17:11:21.257308 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 25 17:11:21 crc kubenswrapper[4812]: I1125 17:11:21.432559 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 25 17:11:21 crc kubenswrapper[4812]: I1125 17:11:21.444450 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 25 17:11:21 crc kubenswrapper[4812]: I1125 17:11:21.456248 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 25 17:11:21 crc kubenswrapper[4812]: I1125 17:11:21.725171 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 25 17:11:21 crc kubenswrapper[4812]: I1125 17:11:21.738798 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-h428w" Nov 25 17:11:21 crc kubenswrapper[4812]: I1125 17:11:21.800225 4812 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 25 17:11:21 crc kubenswrapper[4812]: I1125 17:11:21.868112 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 25 17:11:21 crc kubenswrapper[4812]: I1125 17:11:21.884699 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.032361 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.049255 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.108129 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.225630 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.271684 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.331130 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.373149 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.442081 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.476633 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-f7vvj" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.508000 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.649731 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-clwgz" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.650420 4812 scope.go:117] "RemoveContainer" containerID="f66145e7cdf5247b4b160714dafbbe9896c9c319b2c62d097bb4b8c2fcfcfa4c" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.694126 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-jnszn" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.694187 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-jnszn" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.694797 4812 scope.go:117] "RemoveContainer" containerID="82949c4052896799150c9ef1b6118840c51e178295d92a96631f45c8a2eccd94" Nov 25 17:11:22 crc kubenswrapper[4812]: E1125 17:11:22.695010 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=cinder-operator-controller-manager-79856dc55c-jnszn_openstack-operators(36e2e4fb-0369-4347-b8e6-3f4bb2e6f88b)\"" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-jnszn" podUID="36e2e4fb-0369-4347-b8e6-3f4bb2e6f88b" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.732738 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7dd4g" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.732963 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7dd4g" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.733999 4812 scope.go:117] "RemoveContainer" containerID="fbb500c641b5cc074f3df777c793bf8e0e6b1cb01d65ac36e8ba24399c112100" Nov 25 17:11:22 crc kubenswrapper[4812]: E1125 17:11:22.734419 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=designate-operator-controller-manager-7d695c9b56-7dd4g_openstack-operators(2374c36a-5118-4a90-985c-1f80597d73af)\"" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7dd4g" podUID="2374c36a-5118-4a90-985c-1f80597d73af" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.739331 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-69b8c86946-ww5m5" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.739379 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/glance-operator-controller-manager-69b8c86946-ww5m5" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.740001 4812 scope.go:117] "RemoveContainer" containerID="ea12be0fb67be05c0fb3a3a37cac5cbc679efaf06295f7c24c2aa14753bf9224" Nov 25 17:11:22 crc kubenswrapper[4812]: E1125 17:11:22.740230 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=glance-operator-controller-manager-69b8c86946-ww5m5_openstack-operators(d53b5c25-d66b-46c5-80a5-998eb9007598)\"" pod="openstack-operators/glance-operator-controller-manager-69b8c86946-ww5m5" podUID="d53b5c25-d66b-46c5-80a5-998eb9007598" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.766671 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/heat-operator-controller-manager-774b86978c-zzhb4" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.766708 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-774b86978c-zzhb4" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.767304 4812 scope.go:117] "RemoveContainer" containerID="0e3913a87e20a122062170a91a948fa308b67df80b0b866a702874de57840085" Nov 25 17:11:22 crc kubenswrapper[4812]: E1125 17:11:22.767551 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=heat-operator-controller-manager-774b86978c-zzhb4_openstack-operators(7f72311f-8622-43f6-b499-8b52318b0e2a)\"" pod="openstack-operators/heat-operator-controller-manager-774b86978c-zzhb4" podUID="7f72311f-8622-43f6-b499-8b52318b0e2a" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.801956 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jth28" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.802001 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jth28" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.802707 4812 scope.go:117] "RemoveContainer" containerID="3a4e07f58fa1c309f417fd4e1d09f6e3f4c296183b158bec11624c5828ad4bc5" Nov 25 17:11:22 crc kubenswrapper[4812]: E1125 17:11:22.802955 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=horizon-operator-controller-manager-68c9694994-jth28_openstack-operators(39d8b8c1-7015-487a-9263-25531a65c48c)\"" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jth28" podUID="39d8b8c1-7015-487a-9263-25531a65c48c" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.813925 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.826741 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.832029 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.879730 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-858778c9dc-ncdvr" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.879780 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/infra-operator-controller-manager-858778c9dc-ncdvr" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.880450 4812 scope.go:117] "RemoveContainer" containerID="8eb28280138f0530faa5c201d867e1d77c3034ef7338e9d1c27321b4e1aacf4e" Nov 25 17:11:22 crc kubenswrapper[4812]: E1125 17:11:22.880736 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=infra-operator-controller-manager-858778c9dc-ncdvr_openstack-operators(16fc0b64-6599-4b8b-a0b7-b609dab9dd31)\"" pod="openstack-operators/infra-operator-controller-manager-858778c9dc-ncdvr" podUID="16fc0b64-6599-4b8b-a0b7-b609dab9dd31" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.881376 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.901880 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-pljcc" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.901920 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-pljcc" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.902806 4812 scope.go:117] "RemoveContainer" containerID="9b1184f1cf904337a82631fc47973387b0346ad5e9c2e95de6b8372d1a7db551" Nov 25 17:11:22 crc kubenswrapper[4812]: E1125 17:11:22.903309 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ironic-operator-controller-manager-5bfcdc958c-pljcc_openstack-operators(48707b31-d8f9-4a7e-a8b9-2728249f0a49)\"" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-pljcc" podUID="48707b31-d8f9-4a7e-a8b9-2728249f0a49" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.941316 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-6jwl5" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.942237 4812 scope.go:117] "RemoveContainer" containerID="77492562f647df5b5e627db1c659255130e1fdab509b60836078f25baf5c86b4" Nov 25 17:11:22 crc kubenswrapper[4812]: E1125 17:11:22.942564 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=keystone-operator-controller-manager-748dc6576f-6jwl5_openstack-operators(4c649e41-10e8-4eee-bfc0-bf1a9409e421)\"" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-6jwl5" podUID="4c649e41-10e8-4eee-bfc0-bf1a9409e421" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.942577 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-6jwl5" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.966676 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.992089 4812 generic.go:334] "Generic (PLEG): container finished" podID="bac38f31-ec39-46b9-9bac-2920864fb8a2" containerID="1edcc34c74f4e39ca91a1585a77f25d802e986fe8e0bb797c7672a09fae3d173" exitCode=1 Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.992164 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-clwgz" event={"ID":"bac38f31-ec39-46b9-9bac-2920864fb8a2","Type":"ContainerDied","Data":"1edcc34c74f4e39ca91a1585a77f25d802e986fe8e0bb797c7672a09fae3d173"} Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.992247 4812 scope.go:117] "RemoveContainer" containerID="f66145e7cdf5247b4b160714dafbbe9896c9c319b2c62d097bb4b8c2fcfcfa4c" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.992699 4812 scope.go:117] "RemoveContainer" containerID="77492562f647df5b5e627db1c659255130e1fdab509b60836078f25baf5c86b4" Nov 25 17:11:22 crc kubenswrapper[4812]: E1125 17:11:22.992932 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=keystone-operator-controller-manager-748dc6576f-6jwl5_openstack-operators(4c649e41-10e8-4eee-bfc0-bf1a9409e421)\"" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-6jwl5" podUID="4c649e41-10e8-4eee-bfc0-bf1a9409e421" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.993282 4812 scope.go:117] "RemoveContainer" containerID="1edcc34c74f4e39ca91a1585a77f25d802e986fe8e0bb797c7672a09fae3d173" Nov 25 17:11:22 crc kubenswrapper[4812]: I1125 17:11:22.993599 4812 scope.go:117] "RemoveContainer" containerID="fbb500c641b5cc074f3df777c793bf8e0e6b1cb01d65ac36e8ba24399c112100" Nov 25 17:11:22 crc kubenswrapper[4812]: E1125 17:11:22.993746 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=barbican-operator-controller-manager-86dc4d89c8-clwgz_openstack-operators(bac38f31-ec39-46b9-9bac-2920864fb8a2)\"" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-clwgz" podUID="bac38f31-ec39-46b9-9bac-2920864fb8a2" Nov 25 17:11:22 crc kubenswrapper[4812]: E1125 17:11:22.994257 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=designate-operator-controller-manager-7d695c9b56-7dd4g_openstack-operators(2374c36a-5118-4a90-985c-1f80597d73af)\"" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7dd4g" podUID="2374c36a-5118-4a90-985c-1f80597d73af" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.013913 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-vnjdk" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.013959 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-vnjdk" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.014696 4812 scope.go:117] "RemoveContainer" containerID="468b00f31e6fe2ed94df170c5ba250d570c46c542a573b01b2e33967b6148db5" Nov 25 17:11:23 crc kubenswrapper[4812]: E1125 17:11:23.014941 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=manila-operator-controller-manager-58bb8d67cc-vnjdk_openstack-operators(4709cc72-b3b5-4bf6-ac84-45cc1f6bc2bf)\"" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-vnjdk" podUID="4709cc72-b3b5-4bf6-ac84-45cc1f6bc2bf" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.052303 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qn86p" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.052376 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qn86p" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.053153 4812 scope.go:117] "RemoveContainer" containerID="b8442e1a39175832ba3feb326b882fbe2bb756a68c59c4d63fbbd625a9a0c3b8" Nov 25 17:11:23 crc kubenswrapper[4812]: E1125 17:11:23.053399 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=mariadb-operator-controller-manager-cb6c4fdb7-qn86p_openstack-operators(18418f15-9ec8-48df-a761-118f45058d06)\"" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qn86p" podUID="18418f15-9ec8-48df-a761-118f45058d06" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.071464 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-bsfwl" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.071725 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-bsfwl" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.072455 4812 scope.go:117] "RemoveContainer" containerID="e9d2fc83a7593f7c1635ae3b7c566c94ee6d7a1f74f371ae9f41e742697c99dc" Nov 25 17:11:23 crc kubenswrapper[4812]: E1125 17:11:23.072786 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=nova-operator-controller-manager-79556f57fc-bsfwl_openstack-operators(ab19d0cd-1e29-41af-892c-8f25f12b7f1c)\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-bsfwl" podUID="ab19d0cd-1e29-41af-892c-8f25f12b7f1c" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.081939 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vncgw" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.081967 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vncgw" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.082408 4812 scope.go:117] "RemoveContainer" containerID="8d0939ff71bec4d973344176bda28eb7c70a23fa0d4332785bbb86e17daec07a" Nov 25 17:11:23 crc kubenswrapper[4812]: E1125 17:11:23.082629 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=neutron-operator-controller-manager-7c57c8bbc4-vncgw_openstack-operators(d6f00506-8ef7-46ec-9492-01e0005f90d3)\"" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vncgw" podUID="d6f00506-8ef7-46ec-9492-01e0005f90d3" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.105606 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-6kqrr" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.105650 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-6kqrr" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.106315 4812 scope.go:117] "RemoveContainer" containerID="ae21c28290e91db5ab2a8cee0dcabe5e11851b94c6512e78f0c84a4fb8de75fb" Nov 25 17:11:23 crc kubenswrapper[4812]: E1125 17:11:23.106611 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=octavia-operator-controller-manager-fd75fd47d-6kqrr_openstack-operators(15120e64-d800-43d8-b8c3-673e5854baef)\"" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-6kqrr" podUID="15120e64-d800-43d8-b8c3-673e5854baef" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.107482 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.131928 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-mrlbz" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.131978 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-mrlbz" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.132909 4812 scope.go:117] "RemoveContainer" containerID="68de025290379e872cba6ed70e5d1c3d5de1edcc88586946094d5afd6b734b13" Nov 25 17:11:23 crc kubenswrapper[4812]: E1125 17:11:23.133148 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ovn-operator-controller-manager-66cf5c67ff-mrlbz_openstack-operators(1e943cab-36af-421d-b7a4-24010912da99)\"" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-mrlbz" podUID="1e943cab-36af-421d-b7a4-24010912da99" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.233073 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.241624 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kx2fg" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.241740 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kx2fg" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.242318 4812 scope.go:117] "RemoveContainer" containerID="ee242746ec3b71c79ddc4928f6ad59d8abf30e80b8e44dce44c22d89ecbf2698" Nov 25 17:11:23 crc kubenswrapper[4812]: E1125 17:11:23.242667 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=placement-operator-controller-manager-5db546f9d9-kx2fg_openstack-operators(bb01ec67-804d-4800-9ab4-e607563017b2)\"" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kx2fg" podUID="bb01ec67-804d-4800-9ab4-e607563017b2" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.246956 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.312655 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-2gwk8" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.312695 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-2gwk8" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.313298 4812 scope.go:117] "RemoveContainer" containerID="6b93e40e0946b4e1ee373e03b1f209c808b6cc86be77b380b3da4174a7b4149b" Nov 25 17:11:23 crc kubenswrapper[4812]: E1125 17:11:23.313512 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=swift-operator-controller-manager-6fdc4fcf86-2gwk8_openstack-operators(10dee73c-a6d8-429d-b5c0-9226eec6d1f3)\"" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-2gwk8" podUID="10dee73c-a6d8-429d-b5c0-9226eec6d1f3" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.319808 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.326397 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.330086 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.331053 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-h9l5r" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.331446 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-h9l5r" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.332001 4812 scope.go:117] "RemoveContainer" containerID="ff965109c7c78b9a06b587b6eb481aa6e13416cc40355af9d7bc1d217cf851c9" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.344302 4812 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-hrdjk" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.346643 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-rggnk" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.365493 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.384279 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.440033 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.495960 4812 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.496060 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.526054 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5cb74df96-jnhr9" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.541574 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.549228 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/watcher-operator-controller-manager-864885998-vzgsw" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.549280 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-864885998-vzgsw" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.549992 4812 scope.go:117] "RemoveContainer" containerID="edcf94dff91b9e5dfdc5bece5f9e35765e41c9ed9efbfd4b60351cfd6b0e4444" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.552590 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.619107 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.724823 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.738938 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.763620 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.780515 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.790969 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.833795 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.872566 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.872767 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 25 17:11:23 crc kubenswrapper[4812]: I1125 17:11:23.927188 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 25 17:11:24 crc kubenswrapper[4812]: I1125 17:11:24.006923 4812 generic.go:334] "Generic (PLEG): container finished" podID="07907797-7edd-48e0-bb69-e42ad740f173" containerID="a77c0821038f28cdde6745abbd5de4bebce582ca274a475e132734ea20dd6eab" exitCode=1 Nov 25 17:11:24 crc kubenswrapper[4812]: I1125 17:11:24.007003 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-h9l5r" event={"ID":"07907797-7edd-48e0-bb69-e42ad740f173","Type":"ContainerDied","Data":"a77c0821038f28cdde6745abbd5de4bebce582ca274a475e132734ea20dd6eab"} Nov 25 17:11:24 crc kubenswrapper[4812]: I1125 17:11:24.007040 4812 scope.go:117] "RemoveContainer" containerID="ff965109c7c78b9a06b587b6eb481aa6e13416cc40355af9d7bc1d217cf851c9" Nov 25 17:11:24 crc kubenswrapper[4812]: I1125 17:11:24.007597 4812 scope.go:117] "RemoveContainer" containerID="a77c0821038f28cdde6745abbd5de4bebce582ca274a475e132734ea20dd6eab" Nov 25 17:11:24 crc kubenswrapper[4812]: E1125 17:11:24.007870 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=telemetry-operator-controller-manager-567f98c9d-h9l5r_openstack-operators(07907797-7edd-48e0-bb69-e42ad740f173)\"" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-h9l5r" podUID="07907797-7edd-48e0-bb69-e42ad740f173" Nov 25 17:11:24 crc kubenswrapper[4812]: I1125 17:11:24.011693 4812 generic.go:334] "Generic (PLEG): container finished" podID="98133284-26db-4073-a43c-f9572476153c" containerID="01d064eb252e3e08eaf974fd8346bdc265c9d1e86927bb4061b2b86cc0c59478" exitCode=1 Nov 25 17:11:24 crc kubenswrapper[4812]: I1125 17:11:24.011928 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-vzgsw" event={"ID":"98133284-26db-4073-a43c-f9572476153c","Type":"ContainerDied","Data":"01d064eb252e3e08eaf974fd8346bdc265c9d1e86927bb4061b2b86cc0c59478"} Nov 25 17:11:24 crc kubenswrapper[4812]: I1125 17:11:24.012188 4812 scope.go:117] "RemoveContainer" containerID="e9d2fc83a7593f7c1635ae3b7c566c94ee6d7a1f74f371ae9f41e742697c99dc" Nov 25 17:11:24 crc kubenswrapper[4812]: E1125 17:11:24.012456 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=nova-operator-controller-manager-79556f57fc-bsfwl_openstack-operators(ab19d0cd-1e29-41af-892c-8f25f12b7f1c)\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-bsfwl" podUID="ab19d0cd-1e29-41af-892c-8f25f12b7f1c" Nov 25 17:11:24 crc kubenswrapper[4812]: I1125 17:11:24.012525 4812 scope.go:117] "RemoveContainer" containerID="01d064eb252e3e08eaf974fd8346bdc265c9d1e86927bb4061b2b86cc0c59478" Nov 25 17:11:24 crc kubenswrapper[4812]: I1125 17:11:24.012763 4812 scope.go:117] "RemoveContainer" containerID="ee242746ec3b71c79ddc4928f6ad59d8abf30e80b8e44dce44c22d89ecbf2698" Nov 25 17:11:24 crc kubenswrapper[4812]: E1125 17:11:24.012831 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=watcher-operator-controller-manager-864885998-vzgsw_openstack-operators(98133284-26db-4073-a43c-f9572476153c)\"" pod="openstack-operators/watcher-operator-controller-manager-864885998-vzgsw" podUID="98133284-26db-4073-a43c-f9572476153c" Nov 25 17:11:24 crc kubenswrapper[4812]: E1125 17:11:24.012958 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=placement-operator-controller-manager-5db546f9d9-kx2fg_openstack-operators(bb01ec67-804d-4800-9ab4-e607563017b2)\"" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kx2fg" podUID="bb01ec67-804d-4800-9ab4-e607563017b2" Nov 25 17:11:24 crc kubenswrapper[4812]: I1125 17:11:24.013193 4812 scope.go:117] "RemoveContainer" containerID="ae21c28290e91db5ab2a8cee0dcabe5e11851b94c6512e78f0c84a4fb8de75fb" Nov 25 17:11:24 crc kubenswrapper[4812]: E1125 17:11:24.013405 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=octavia-operator-controller-manager-fd75fd47d-6kqrr_openstack-operators(15120e64-d800-43d8-b8c3-673e5854baef)\"" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-6kqrr" podUID="15120e64-d800-43d8-b8c3-673e5854baef" Nov 25 17:11:24 crc kubenswrapper[4812]: I1125 17:11:24.015499 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-2vmzh" Nov 25 17:11:24 crc kubenswrapper[4812]: I1125 17:11:24.051877 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 25 17:11:24 crc kubenswrapper[4812]: I1125 17:11:24.075287 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 25 17:11:24 crc kubenswrapper[4812]: I1125 17:11:24.090665 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 25 17:11:24 crc kubenswrapper[4812]: I1125 17:11:24.098731 4812 scope.go:117] "RemoveContainer" containerID="edcf94dff91b9e5dfdc5bece5f9e35765e41c9ed9efbfd4b60351cfd6b0e4444" Nov 25 17:11:24 crc kubenswrapper[4812]: I1125 17:11:24.110965 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 25 17:11:24 crc kubenswrapper[4812]: I1125 17:11:24.210092 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 25 17:11:24 crc kubenswrapper[4812]: I1125 17:11:24.236890 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 25 17:11:24 crc kubenswrapper[4812]: I1125 17:11:24.255865 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 25 17:11:24 crc kubenswrapper[4812]: I1125 17:11:24.319484 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 25 17:11:24 crc kubenswrapper[4812]: I1125 17:11:24.328055 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 25 17:11:24 crc kubenswrapper[4812]: I1125 17:11:24.421091 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/kube-state-metrics-0" Nov 25 17:11:24 crc kubenswrapper[4812]: I1125 17:11:24.421406 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 25 17:11:24 crc kubenswrapper[4812]: I1125 17:11:24.422207 4812 scope.go:117] "RemoveContainer" containerID="c59fbae8b28246efe42480c116562d22cc798259dc7368bdd00fc02a1da79561" Nov 25 17:11:24 crc kubenswrapper[4812]: E1125 17:11:24.422547 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-state-metrics pod=kube-state-metrics-0_openstack(91bfadbe-a98d-49e4-88a9-97be162972a5)\"" pod="openstack/kube-state-metrics-0" podUID="91bfadbe-a98d-49e4-88a9-97be162972a5" Nov 25 17:11:24 crc kubenswrapper[4812]: I1125 17:11:24.464519 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-6f7b877f74-qcc8n" Nov 25 17:11:24 crc kubenswrapper[4812]: I1125 17:11:24.465201 4812 scope.go:117] "RemoveContainer" containerID="91928ed0030dbd69ea3befde813631b393d823ccd40abf3ad6b9f0e037314141" Nov 25 17:11:24 crc kubenswrapper[4812]: E1125 17:11:24.465456 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=metallb-operator-controller-manager-6f7b877f74-qcc8n_metallb-system(06fdd0d8-45b7-4787-9f77-24f76fccc672)\"" pod="metallb-system/metallb-operator-controller-manager-6f7b877f74-qcc8n" podUID="06fdd0d8-45b7-4787-9f77-24f76fccc672" Nov 25 17:11:24 crc kubenswrapper[4812]: I1125 17:11:24.483300 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 25 17:11:24 crc kubenswrapper[4812]: I1125 17:11:24.520372 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 25 17:11:24 crc kubenswrapper[4812]: I1125 17:11:24.559246 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 25 17:11:24 crc kubenswrapper[4812]: I1125 17:11:24.566402 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 25 17:11:24 crc kubenswrapper[4812]: I1125 17:11:24.611501 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 25 17:11:24 crc kubenswrapper[4812]: I1125 17:11:24.651285 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 25 17:11:24 crc kubenswrapper[4812]: I1125 17:11:24.676660 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-2d2j9" Nov 25 17:11:24 crc kubenswrapper[4812]: I1125 17:11:24.793188 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 25 17:11:24 crc kubenswrapper[4812]: I1125 17:11:24.837556 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 25 17:11:24 crc kubenswrapper[4812]: I1125 17:11:24.851328 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 25 17:11:24 crc kubenswrapper[4812]: I1125 17:11:24.871456 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 25 17:11:25 crc kubenswrapper[4812]: I1125 17:11:25.021775 4812 scope.go:117] "RemoveContainer" containerID="a77c0821038f28cdde6745abbd5de4bebce582ca274a475e132734ea20dd6eab" Nov 25 17:11:25 crc kubenswrapper[4812]: E1125 17:11:25.021997 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=telemetry-operator-controller-manager-567f98c9d-h9l5r_openstack-operators(07907797-7edd-48e0-bb69-e42ad740f173)\"" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-h9l5r" podUID="07907797-7edd-48e0-bb69-e42ad740f173" Nov 25 17:11:25 crc kubenswrapper[4812]: I1125 17:11:25.029542 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 25 17:11:25 crc kubenswrapper[4812]: I1125 17:11:25.102431 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 25 17:11:25 crc kubenswrapper[4812]: I1125 17:11:25.152905 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 25 17:11:25 crc kubenswrapper[4812]: I1125 17:11:25.174236 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 25 17:11:25 crc kubenswrapper[4812]: I1125 17:11:25.196205 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 25 17:11:25 crc kubenswrapper[4812]: I1125 17:11:25.202464 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 25 17:11:25 crc kubenswrapper[4812]: I1125 17:11:25.207963 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 25 17:11:25 crc kubenswrapper[4812]: I1125 17:11:25.214184 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 25 17:11:25 crc kubenswrapper[4812]: I1125 17:11:25.223522 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 25 17:11:25 crc kubenswrapper[4812]: I1125 17:11:25.244607 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-cz9nb" Nov 25 17:11:25 crc kubenswrapper[4812]: I1125 17:11:25.246142 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 25 17:11:25 crc kubenswrapper[4812]: I1125 17:11:25.264680 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 25 17:11:25 crc kubenswrapper[4812]: I1125 17:11:25.275210 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-dvhx5" Nov 25 17:11:25 crc kubenswrapper[4812]: I1125 17:11:25.299125 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 25 17:11:25 crc kubenswrapper[4812]: I1125 17:11:25.333877 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 25 17:11:25 crc kubenswrapper[4812]: I1125 17:11:25.393483 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 25 17:11:25 crc kubenswrapper[4812]: I1125 17:11:25.491496 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 25 17:11:25 crc kubenswrapper[4812]: I1125 17:11:25.553288 4812 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-gnkrs" Nov 25 17:11:25 crc kubenswrapper[4812]: I1125 17:11:25.594874 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 25 17:11:25 crc kubenswrapper[4812]: I1125 17:11:25.609299 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-n4fjw" Nov 25 17:11:25 crc kubenswrapper[4812]: I1125 17:11:25.610590 4812 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-sl8pv" Nov 25 17:11:25 crc kubenswrapper[4812]: I1125 17:11:25.633425 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 25 17:11:25 crc kubenswrapper[4812]: I1125 17:11:25.633840 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-55cd74b98b-lcn4x" Nov 25 17:11:25 crc kubenswrapper[4812]: I1125 17:11:25.682615 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 25 17:11:25 crc kubenswrapper[4812]: I1125 17:11:25.702542 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 25 17:11:25 crc kubenswrapper[4812]: I1125 17:11:25.730173 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-zw2zw" Nov 25 17:11:25 crc kubenswrapper[4812]: I1125 17:11:25.764596 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 25 17:11:25 crc kubenswrapper[4812]: I1125 17:11:25.787433 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 25 17:11:25 crc kubenswrapper[4812]: I1125 17:11:25.849397 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 25 17:11:25 crc kubenswrapper[4812]: I1125 17:11:25.873291 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 25 17:11:25 crc kubenswrapper[4812]: I1125 17:11:25.910394 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 25 17:11:25 crc kubenswrapper[4812]: I1125 17:11:25.938426 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 25 17:11:25 crc kubenswrapper[4812]: I1125 17:11:25.953791 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 25 17:11:25 crc kubenswrapper[4812]: I1125 17:11:25.956798 4812 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 25 17:11:25 crc kubenswrapper[4812]: I1125 17:11:25.967139 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 17:11:25 crc kubenswrapper[4812]: I1125 17:11:25.967193 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 17:11:25 crc kubenswrapper[4812]: I1125 17:11:25.972547 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 17:11:26 crc kubenswrapper[4812]: I1125 17:11:26.005368 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=17.005331574 podStartE2EDuration="17.005331574s" podCreationTimestamp="2025-11-25 17:11:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:11:26.000453863 +0000 UTC m=+1460.840595978" watchObservedRunningTime="2025-11-25 17:11:26.005331574 +0000 UTC m=+1460.845473669" Nov 25 17:11:26 crc kubenswrapper[4812]: I1125 17:11:26.021386 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 25 17:11:26 crc kubenswrapper[4812]: I1125 17:11:26.053221 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 25 17:11:26 crc kubenswrapper[4812]: I1125 17:11:26.066841 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 25 17:11:26 crc kubenswrapper[4812]: I1125 17:11:26.072899 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 25 17:11:26 crc kubenswrapper[4812]: I1125 17:11:26.088413 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 25 17:11:26 crc kubenswrapper[4812]: I1125 17:11:26.105232 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 25 17:11:26 crc kubenswrapper[4812]: I1125 17:11:26.142116 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 25 17:11:26 crc kubenswrapper[4812]: I1125 17:11:26.184099 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-hb42k" Nov 25 17:11:26 crc kubenswrapper[4812]: I1125 17:11:26.223868 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 25 17:11:26 crc kubenswrapper[4812]: I1125 17:11:26.374393 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-v44fh" Nov 25 17:11:26 crc kubenswrapper[4812]: I1125 17:11:26.375113 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 25 17:11:26 crc kubenswrapper[4812]: I1125 17:11:26.383557 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-qknbl" Nov 25 17:11:26 crc kubenswrapper[4812]: I1125 17:11:26.387199 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 25 17:11:26 crc kubenswrapper[4812]: I1125 17:11:26.419053 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 25 17:11:26 crc kubenswrapper[4812]: I1125 17:11:26.440231 4812 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 25 17:11:26 crc kubenswrapper[4812]: I1125 17:11:26.477002 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 17:11:26 crc kubenswrapper[4812]: I1125 17:11:26.517812 4812 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 25 17:11:26 crc kubenswrapper[4812]: I1125 17:11:26.522045 4812 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 25 17:11:26 crc kubenswrapper[4812]: I1125 17:11:26.559659 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 25 17:11:26 crc kubenswrapper[4812]: I1125 17:11:26.588907 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 25 17:11:26 crc kubenswrapper[4812]: I1125 17:11:26.593682 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-n2tk2" Nov 25 17:11:26 crc kubenswrapper[4812]: I1125 17:11:26.620031 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 25 17:11:26 crc kubenswrapper[4812]: I1125 17:11:26.677064 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 25 17:11:26 crc kubenswrapper[4812]: I1125 17:11:26.713702 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 25 17:11:26 crc kubenswrapper[4812]: I1125 17:11:26.743964 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 17:11:26 crc kubenswrapper[4812]: I1125 17:11:26.776576 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 25 17:11:26 crc kubenswrapper[4812]: I1125 17:11:26.779872 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 25 17:11:26 crc kubenswrapper[4812]: I1125 17:11:26.794873 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 25 17:11:26 crc kubenswrapper[4812]: I1125 17:11:26.813653 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 25 17:11:26 crc kubenswrapper[4812]: I1125 17:11:26.855180 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 25 17:11:26 crc kubenswrapper[4812]: I1125 17:11:26.880508 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 25 17:11:26 crc kubenswrapper[4812]: I1125 17:11:26.894621 4812 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 25 17:11:27 crc kubenswrapper[4812]: I1125 17:11:27.033651 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 25 17:11:27 crc kubenswrapper[4812]: I1125 17:11:27.035457 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 25 17:11:27 crc kubenswrapper[4812]: I1125 17:11:27.040478 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 25 17:11:27 crc kubenswrapper[4812]: I1125 17:11:27.046454 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-fkdrg" Nov 25 17:11:27 crc kubenswrapper[4812]: I1125 17:11:27.059951 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 25 17:11:27 crc kubenswrapper[4812]: I1125 17:11:27.066894 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 25 17:11:27 crc kubenswrapper[4812]: I1125 17:11:27.074466 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 25 17:11:27 crc kubenswrapper[4812]: I1125 17:11:27.111557 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 25 17:11:27 crc kubenswrapper[4812]: I1125 17:11:27.128607 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 25 17:11:27 crc kubenswrapper[4812]: I1125 17:11:27.194098 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 25 17:11:27 crc kubenswrapper[4812]: I1125 17:11:27.218474 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 25 17:11:27 crc kubenswrapper[4812]: I1125 17:11:27.223620 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/openstack-operator-controller-manager-585789bb75-mft5q" Nov 25 17:11:27 crc kubenswrapper[4812]: I1125 17:11:27.224275 4812 scope.go:117] "RemoveContainer" containerID="63773267ebd34253f47b369dfc178e87cc4289935354908615eceb688e1bee2e" Nov 25 17:11:27 crc kubenswrapper[4812]: I1125 17:11:27.235709 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-k95ld" Nov 25 17:11:27 crc kubenswrapper[4812]: I1125 17:11:27.424940 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 25 17:11:27 crc kubenswrapper[4812]: I1125 17:11:27.492289 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-xmqng" Nov 25 17:11:27 crc kubenswrapper[4812]: I1125 17:11:27.521563 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 25 17:11:27 crc kubenswrapper[4812]: I1125 17:11:27.552727 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 25 17:11:27 crc kubenswrapper[4812]: I1125 17:11:27.576152 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Nov 25 17:11:27 crc kubenswrapper[4812]: I1125 17:11:27.598200 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 25 17:11:27 crc kubenswrapper[4812]: I1125 17:11:27.637993 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 25 17:11:27 crc kubenswrapper[4812]: I1125 17:11:27.671046 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 25 17:11:27 crc kubenswrapper[4812]: I1125 17:11:27.728716 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 25 17:11:27 crc kubenswrapper[4812]: I1125 17:11:27.730144 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 25 17:11:27 crc kubenswrapper[4812]: I1125 17:11:27.783201 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 25 17:11:27 crc kubenswrapper[4812]: I1125 17:11:27.853293 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 25 17:11:27 crc kubenswrapper[4812]: I1125 17:11:27.867626 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 25 17:11:27 crc kubenswrapper[4812]: I1125 17:11:27.874721 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 25 17:11:27 crc kubenswrapper[4812]: I1125 17:11:27.909363 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 25 17:11:27 crc kubenswrapper[4812]: I1125 17:11:27.919127 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 25 17:11:27 crc kubenswrapper[4812]: I1125 17:11:27.972809 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 25 17:11:27 crc kubenswrapper[4812]: I1125 17:11:27.981600 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 25 17:11:28 crc kubenswrapper[4812]: I1125 17:11:28.074768 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-585789bb75-mft5q" event={"ID":"f3785053-5fa1-43b6-86f7-0182a1a49946","Type":"ContainerStarted","Data":"2b07a75e6da1a8887a58061dff03764e4ddfd94be2fc085a38ed124d902b8482"} Nov 25 17:11:28 crc kubenswrapper[4812]: I1125 17:11:28.074969 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-585789bb75-mft5q" Nov 25 17:11:28 crc kubenswrapper[4812]: I1125 17:11:28.129992 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 25 17:11:28 crc kubenswrapper[4812]: I1125 17:11:28.130037 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 25 17:11:28 crc kubenswrapper[4812]: I1125 17:11:28.178312 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 25 17:11:28 crc kubenswrapper[4812]: I1125 17:11:28.179609 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 25 17:11:28 crc kubenswrapper[4812]: I1125 17:11:28.195869 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 25 17:11:28 crc kubenswrapper[4812]: I1125 17:11:28.238987 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 25 17:11:28 crc kubenswrapper[4812]: I1125 17:11:28.344461 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 25 17:11:28 crc kubenswrapper[4812]: I1125 17:11:28.353049 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 25 17:11:28 crc kubenswrapper[4812]: I1125 17:11:28.412253 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-rs74b" Nov 25 17:11:28 crc kubenswrapper[4812]: I1125 17:11:28.435601 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 25 17:11:28 crc kubenswrapper[4812]: I1125 17:11:28.440594 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-z94w7" Nov 25 17:11:28 crc kubenswrapper[4812]: I1125 17:11:28.476615 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 25 17:11:28 crc kubenswrapper[4812]: I1125 17:11:28.481595 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 25 17:11:28 crc kubenswrapper[4812]: I1125 17:11:28.512370 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 25 17:11:28 crc kubenswrapper[4812]: I1125 17:11:28.533688 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 25 17:11:28 crc kubenswrapper[4812]: I1125 17:11:28.561303 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 25 17:11:28 crc kubenswrapper[4812]: I1125 17:11:28.573178 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 25 17:11:28 crc kubenswrapper[4812]: I1125 17:11:28.588956 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 25 17:11:28 crc kubenswrapper[4812]: I1125 17:11:28.614655 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-gr9tw" Nov 25 17:11:28 crc kubenswrapper[4812]: I1125 17:11:28.617715 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 25 17:11:28 crc kubenswrapper[4812]: I1125 17:11:28.623467 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 25 17:11:28 crc kubenswrapper[4812]: I1125 17:11:28.626727 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 25 17:11:28 crc kubenswrapper[4812]: I1125 17:11:28.667093 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 25 17:11:28 crc kubenswrapper[4812]: I1125 17:11:28.800107 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 25 17:11:28 crc kubenswrapper[4812]: I1125 17:11:28.817808 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 25 17:11:28 crc kubenswrapper[4812]: I1125 17:11:28.831372 4812 scope.go:117] "RemoveContainer" containerID="ae5aeb8183bcd8df53a3113e7b4bb442d72263eef27b41eb49e44b6868635389" Nov 25 17:11:28 crc kubenswrapper[4812]: I1125 17:11:28.838433 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 25 17:11:28 crc kubenswrapper[4812]: I1125 17:11:28.884498 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 25 17:11:28 crc kubenswrapper[4812]: I1125 17:11:28.896990 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 25 17:11:28 crc kubenswrapper[4812]: I1125 17:11:28.934028 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.003419 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.036717 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.042695 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.083482 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-sb4m4" event={"ID":"b8169d76-3e78-4510-aa23-e8d733d495a2","Type":"ContainerStarted","Data":"a6134c48535348f87ff1a2f85997a3a4f2714cf6fd0ed3c4d1488e90c53a0812"} Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.095858 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.151520 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-v2wvg" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.162198 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.162595 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.192210 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.233760 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.260418 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.272955 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.361142 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.440891 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.441789 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.443938 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.488967 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.496301 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.510905 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.533831 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.604924 4812 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.687409 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.700299 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.719561 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.742137 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.746886 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.756770 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.811638 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.833898 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.845023 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.858496 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-8gzzr" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.908247 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-4zdz8" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.912498 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-hgsrb" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.914209 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-tvgf8"] Nov 25 17:11:29 crc kubenswrapper[4812]: E1125 17:11:29.914795 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ce85523-4c30-4a7b-b909-3e8c257895ce" containerName="installer" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.914821 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ce85523-4c30-4a7b-b909-3e8c257895ce" containerName="installer" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.915050 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ce85523-4c30-4a7b-b909-3e8c257895ce" containerName="installer" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.916619 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tvgf8" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.927366 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tvgf8"] Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.969229 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.988123 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.997444 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 25 17:11:29 crc kubenswrapper[4812]: I1125 17:11:29.997942 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.018089 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/466d78b1-4962-4530-8f14-23f1077f5e37-catalog-content\") pod \"certified-operators-tvgf8\" (UID: \"466d78b1-4962-4530-8f14-23f1077f5e37\") " pod="openshift-marketplace/certified-operators-tvgf8" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.018144 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/466d78b1-4962-4530-8f14-23f1077f5e37-utilities\") pod \"certified-operators-tvgf8\" (UID: \"466d78b1-4962-4530-8f14-23f1077f5e37\") " pod="openshift-marketplace/certified-operators-tvgf8" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.018180 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vfg7c\" (UniqueName: \"kubernetes.io/projected/466d78b1-4962-4530-8f14-23f1077f5e37-kube-api-access-vfg7c\") pod \"certified-operators-tvgf8\" (UID: \"466d78b1-4962-4530-8f14-23f1077f5e37\") " pod="openshift-marketplace/certified-operators-tvgf8" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.022651 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-wl74c" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.038620 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.041581 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.068606 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.100021 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.119588 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/466d78b1-4962-4530-8f14-23f1077f5e37-catalog-content\") pod \"certified-operators-tvgf8\" (UID: \"466d78b1-4962-4530-8f14-23f1077f5e37\") " pod="openshift-marketplace/certified-operators-tvgf8" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.119662 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/466d78b1-4962-4530-8f14-23f1077f5e37-utilities\") pod \"certified-operators-tvgf8\" (UID: \"466d78b1-4962-4530-8f14-23f1077f5e37\") " pod="openshift-marketplace/certified-operators-tvgf8" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.119721 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vfg7c\" (UniqueName: \"kubernetes.io/projected/466d78b1-4962-4530-8f14-23f1077f5e37-kube-api-access-vfg7c\") pod \"certified-operators-tvgf8\" (UID: \"466d78b1-4962-4530-8f14-23f1077f5e37\") " pod="openshift-marketplace/certified-operators-tvgf8" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.120280 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/466d78b1-4962-4530-8f14-23f1077f5e37-utilities\") pod \"certified-operators-tvgf8\" (UID: \"466d78b1-4962-4530-8f14-23f1077f5e37\") " pod="openshift-marketplace/certified-operators-tvgf8" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.120312 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/466d78b1-4962-4530-8f14-23f1077f5e37-catalog-content\") pod \"certified-operators-tvgf8\" (UID: \"466d78b1-4962-4530-8f14-23f1077f5e37\") " pod="openshift-marketplace/certified-operators-tvgf8" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.137873 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vfg7c\" (UniqueName: \"kubernetes.io/projected/466d78b1-4962-4530-8f14-23f1077f5e37-kube-api-access-vfg7c\") pod \"certified-operators-tvgf8\" (UID: \"466d78b1-4962-4530-8f14-23f1077f5e37\") " pod="openshift-marketplace/certified-operators-tvgf8" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.139757 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.203436 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.222332 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.235898 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.240264 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.240413 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.263706 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-drzcc" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.270026 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tvgf8" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.306952 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.313825 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.390601 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.423147 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.479781 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.488076 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.519658 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.614290 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-5k96m" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.614503 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-b29xg" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.615507 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.667557 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.680163 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.712243 4812 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.727090 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.732625 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.739617 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.742144 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.769781 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.776287 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tvgf8"] Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.795601 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.817146 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.838268 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.890430 4812 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.891789 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.914885 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-2k824" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.924446 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.937313 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.947118 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.950356 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.970496 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.988270 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 25 17:11:30 crc kubenswrapper[4812]: I1125 17:11:30.997153 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 25 17:11:31 crc kubenswrapper[4812]: I1125 17:11:31.003121 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 25 17:11:31 crc kubenswrapper[4812]: I1125 17:11:31.018376 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 25 17:11:31 crc kubenswrapper[4812]: I1125 17:11:31.101084 4812 generic.go:334] "Generic (PLEG): container finished" podID="466d78b1-4962-4530-8f14-23f1077f5e37" containerID="14867abf92daa3e1d8350d568dfc3bf2a46aaa4109aab8c253fa517771fb8414" exitCode=0 Nov 25 17:11:31 crc kubenswrapper[4812]: I1125 17:11:31.101126 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tvgf8" event={"ID":"466d78b1-4962-4530-8f14-23f1077f5e37","Type":"ContainerDied","Data":"14867abf92daa3e1d8350d568dfc3bf2a46aaa4109aab8c253fa517771fb8414"} Nov 25 17:11:31 crc kubenswrapper[4812]: I1125 17:11:31.101151 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tvgf8" event={"ID":"466d78b1-4962-4530-8f14-23f1077f5e37","Type":"ContainerStarted","Data":"f16331351a05fe3728cd77e6c47ef5e7b894caa5409cbcdabe02040aa1578ae9"} Nov 25 17:11:31 crc kubenswrapper[4812]: I1125 17:11:31.205974 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 25 17:11:31 crc kubenswrapper[4812]: I1125 17:11:31.228051 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 25 17:11:31 crc kubenswrapper[4812]: I1125 17:11:31.315673 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 25 17:11:31 crc kubenswrapper[4812]: I1125 17:11:31.344790 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 25 17:11:31 crc kubenswrapper[4812]: I1125 17:11:31.377508 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-pft6m" Nov 25 17:11:31 crc kubenswrapper[4812]: I1125 17:11:31.417805 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 25 17:11:31 crc kubenswrapper[4812]: I1125 17:11:31.447256 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-wghrc" Nov 25 17:11:31 crc kubenswrapper[4812]: I1125 17:11:31.453801 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 25 17:11:31 crc kubenswrapper[4812]: I1125 17:11:31.483011 4812 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 25 17:11:31 crc kubenswrapper[4812]: I1125 17:11:31.483267 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 25 17:11:31 crc kubenswrapper[4812]: I1125 17:11:31.490891 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 25 17:11:31 crc kubenswrapper[4812]: I1125 17:11:31.525271 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-s426j" Nov 25 17:11:31 crc kubenswrapper[4812]: I1125 17:11:31.529888 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 25 17:11:31 crc kubenswrapper[4812]: I1125 17:11:31.532989 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 25 17:11:31 crc kubenswrapper[4812]: I1125 17:11:31.586389 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 25 17:11:31 crc kubenswrapper[4812]: I1125 17:11:31.635942 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-h9qch" Nov 25 17:11:31 crc kubenswrapper[4812]: I1125 17:11:31.636073 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 25 17:11:31 crc kubenswrapper[4812]: I1125 17:11:31.636114 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-jfwbj" Nov 25 17:11:31 crc kubenswrapper[4812]: I1125 17:11:31.715422 4812 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 25 17:11:31 crc kubenswrapper[4812]: I1125 17:11:31.716566 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 25 17:11:31 crc kubenswrapper[4812]: I1125 17:11:31.756446 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 25 17:11:31 crc kubenswrapper[4812]: I1125 17:11:31.774684 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 25 17:11:31 crc kubenswrapper[4812]: I1125 17:11:31.783397 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 25 17:11:31 crc kubenswrapper[4812]: I1125 17:11:31.784847 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-gdlbb" Nov 25 17:11:31 crc kubenswrapper[4812]: I1125 17:11:31.796665 4812 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 25 17:11:31 crc kubenswrapper[4812]: I1125 17:11:31.926864 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-8wt7c" Nov 25 17:11:31 crc kubenswrapper[4812]: I1125 17:11:31.959494 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 25 17:11:31 crc kubenswrapper[4812]: I1125 17:11:31.991633 4812 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 17:11:31 crc kubenswrapper[4812]: I1125 17:11:31.991845 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://8ad5d89dd345e7340100873cdfdba709698c08cccb9980b09e786f24386e5949" gracePeriod=5 Nov 25 17:11:31 crc kubenswrapper[4812]: I1125 17:11:31.993467 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-6fm5m" Nov 25 17:11:32 crc kubenswrapper[4812]: I1125 17:11:32.011698 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 25 17:11:32 crc kubenswrapper[4812]: I1125 17:11:32.016780 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-f8qkh" Nov 25 17:11:32 crc kubenswrapper[4812]: I1125 17:11:32.025554 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 25 17:11:32 crc kubenswrapper[4812]: I1125 17:11:32.116061 4812 generic.go:334] "Generic (PLEG): container finished" podID="466d78b1-4962-4530-8f14-23f1077f5e37" containerID="b4b66900c5aa1e17a0d53ff61d461d014b91986386823f163ffc78e1f72cb934" exitCode=0 Nov 25 17:11:32 crc kubenswrapper[4812]: I1125 17:11:32.116278 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tvgf8" event={"ID":"466d78b1-4962-4530-8f14-23f1077f5e37","Type":"ContainerDied","Data":"b4b66900c5aa1e17a0d53ff61d461d014b91986386823f163ffc78e1f72cb934"} Nov 25 17:11:32 crc kubenswrapper[4812]: I1125 17:11:32.118850 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 25 17:11:32 crc kubenswrapper[4812]: I1125 17:11:32.226248 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 25 17:11:32 crc kubenswrapper[4812]: I1125 17:11:32.237700 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 25 17:11:32 crc kubenswrapper[4812]: I1125 17:11:32.267180 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 25 17:11:32 crc kubenswrapper[4812]: I1125 17:11:32.316989 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 17:11:32 crc kubenswrapper[4812]: I1125 17:11:32.344730 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 25 17:11:32 crc kubenswrapper[4812]: I1125 17:11:32.385361 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 25 17:11:32 crc kubenswrapper[4812]: I1125 17:11:32.385587 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 25 17:11:32 crc kubenswrapper[4812]: I1125 17:11:32.450852 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 25 17:11:32 crc kubenswrapper[4812]: I1125 17:11:32.493152 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 25 17:11:32 crc kubenswrapper[4812]: I1125 17:11:32.539579 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 25 17:11:32 crc kubenswrapper[4812]: I1125 17:11:32.637726 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 25 17:11:32 crc kubenswrapper[4812]: I1125 17:11:32.656945 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-clwgz" Nov 25 17:11:32 crc kubenswrapper[4812]: I1125 17:11:32.658012 4812 scope.go:117] "RemoveContainer" containerID="1edcc34c74f4e39ca91a1585a77f25d802e986fe8e0bb797c7672a09fae3d173" Nov 25 17:11:32 crc kubenswrapper[4812]: E1125 17:11:32.658397 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=barbican-operator-controller-manager-86dc4d89c8-clwgz_openstack-operators(bac38f31-ec39-46b9-9bac-2920864fb8a2)\"" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-clwgz" podUID="bac38f31-ec39-46b9-9bac-2920864fb8a2" Nov 25 17:11:32 crc kubenswrapper[4812]: I1125 17:11:32.683962 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 25 17:11:32 crc kubenswrapper[4812]: I1125 17:11:32.716913 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 25 17:11:32 crc kubenswrapper[4812]: I1125 17:11:32.737040 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 25 17:11:32 crc kubenswrapper[4812]: I1125 17:11:32.737749 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 25 17:11:32 crc kubenswrapper[4812]: I1125 17:11:32.764849 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 25 17:11:32 crc kubenswrapper[4812]: I1125 17:11:32.788720 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 25 17:11:32 crc kubenswrapper[4812]: I1125 17:11:32.871143 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 25 17:11:32 crc kubenswrapper[4812]: I1125 17:11:32.872313 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-4dkvl" Nov 25 17:11:33 crc kubenswrapper[4812]: I1125 17:11:33.048470 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 25 17:11:33 crc kubenswrapper[4812]: I1125 17:11:33.127121 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tvgf8" event={"ID":"466d78b1-4962-4530-8f14-23f1077f5e37","Type":"ContainerStarted","Data":"f5c8903c7f8a802821f94b4a9e0051bc17c1bb4a5b0d78088c51eab4b6907508"} Nov 25 17:11:33 crc kubenswrapper[4812]: I1125 17:11:33.162053 4812 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-n6zbp" Nov 25 17:11:33 crc kubenswrapper[4812]: I1125 17:11:33.163804 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-tvgf8" podStartSLOduration=2.715774173 podStartE2EDuration="4.163786955s" podCreationTimestamp="2025-11-25 17:11:29 +0000 UTC" firstStartedPulling="2025-11-25 17:11:31.10296712 +0000 UTC m=+1465.943109225" lastFinishedPulling="2025-11-25 17:11:32.550979912 +0000 UTC m=+1467.391122007" observedRunningTime="2025-11-25 17:11:33.150923898 +0000 UTC m=+1467.991065993" watchObservedRunningTime="2025-11-25 17:11:33.163786955 +0000 UTC m=+1468.003929050" Nov 25 17:11:33 crc kubenswrapper[4812]: I1125 17:11:33.172556 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-llvr2" Nov 25 17:11:33 crc kubenswrapper[4812]: I1125 17:11:33.176161 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 25 17:11:33 crc kubenswrapper[4812]: I1125 17:11:33.232871 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 25 17:11:33 crc kubenswrapper[4812]: I1125 17:11:33.249628 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 25 17:11:33 crc kubenswrapper[4812]: I1125 17:11:33.309109 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 25 17:11:33 crc kubenswrapper[4812]: I1125 17:11:33.324932 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 25 17:11:33 crc kubenswrapper[4812]: I1125 17:11:33.331131 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-h9l5r" Nov 25 17:11:33 crc kubenswrapper[4812]: I1125 17:11:33.331898 4812 scope.go:117] "RemoveContainer" containerID="a77c0821038f28cdde6745abbd5de4bebce582ca274a475e132734ea20dd6eab" Nov 25 17:11:33 crc kubenswrapper[4812]: E1125 17:11:33.332158 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=telemetry-operator-controller-manager-567f98c9d-h9l5r_openstack-operators(07907797-7edd-48e0-bb69-e42ad740f173)\"" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-h9l5r" podUID="07907797-7edd-48e0-bb69-e42ad740f173" Nov 25 17:11:33 crc kubenswrapper[4812]: I1125 17:11:33.379703 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 25 17:11:33 crc kubenswrapper[4812]: I1125 17:11:33.405199 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 25 17:11:33 crc kubenswrapper[4812]: I1125 17:11:33.405404 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-shqwq" Nov 25 17:11:33 crc kubenswrapper[4812]: I1125 17:11:33.424261 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 25 17:11:33 crc kubenswrapper[4812]: I1125 17:11:33.525020 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-jlhx7" Nov 25 17:11:33 crc kubenswrapper[4812]: I1125 17:11:33.528974 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 25 17:11:33 crc kubenswrapper[4812]: I1125 17:11:33.530112 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 25 17:11:33 crc kubenswrapper[4812]: I1125 17:11:33.548223 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-864885998-vzgsw" Nov 25 17:11:33 crc kubenswrapper[4812]: I1125 17:11:33.548821 4812 scope.go:117] "RemoveContainer" containerID="01d064eb252e3e08eaf974fd8346bdc265c9d1e86927bb4061b2b86cc0c59478" Nov 25 17:11:33 crc kubenswrapper[4812]: E1125 17:11:33.549065 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=watcher-operator-controller-manager-864885998-vzgsw_openstack-operators(98133284-26db-4073-a43c-f9572476153c)\"" pod="openstack-operators/watcher-operator-controller-manager-864885998-vzgsw" podUID="98133284-26db-4073-a43c-f9572476153c" Nov 25 17:11:33 crc kubenswrapper[4812]: I1125 17:11:33.552272 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 25 17:11:33 crc kubenswrapper[4812]: I1125 17:11:33.677154 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 25 17:11:33 crc kubenswrapper[4812]: I1125 17:11:33.686798 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 25 17:11:33 crc kubenswrapper[4812]: I1125 17:11:33.727072 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 25 17:11:33 crc kubenswrapper[4812]: I1125 17:11:33.761608 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 17:11:33 crc kubenswrapper[4812]: I1125 17:11:33.831711 4812 scope.go:117] "RemoveContainer" containerID="0e3913a87e20a122062170a91a948fa308b67df80b0b866a702874de57840085" Nov 25 17:11:33 crc kubenswrapper[4812]: I1125 17:11:33.913391 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 25 17:11:33 crc kubenswrapper[4812]: I1125 17:11:33.962924 4812 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 25 17:11:33 crc kubenswrapper[4812]: I1125 17:11:33.976933 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 25 17:11:33 crc kubenswrapper[4812]: I1125 17:11:33.998215 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 25 17:11:34 crc kubenswrapper[4812]: I1125 17:11:34.022494 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 25 17:11:34 crc kubenswrapper[4812]: I1125 17:11:34.030818 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 25 17:11:34 crc kubenswrapper[4812]: I1125 17:11:34.056507 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 25 17:11:34 crc kubenswrapper[4812]: I1125 17:11:34.062245 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 25 17:11:34 crc kubenswrapper[4812]: I1125 17:11:34.127126 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 25 17:11:34 crc kubenswrapper[4812]: I1125 17:11:34.138958 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-zzhb4" event={"ID":"7f72311f-8622-43f6-b499-8b52318b0e2a","Type":"ContainerStarted","Data":"d5ab4fd00400f6ecd263522819bb3741b4913fdbb7f75d101854c29e6bbd462b"} Nov 25 17:11:34 crc kubenswrapper[4812]: I1125 17:11:34.139731 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-774b86978c-zzhb4" Nov 25 17:11:34 crc kubenswrapper[4812]: I1125 17:11:34.170980 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Nov 25 17:11:34 crc kubenswrapper[4812]: I1125 17:11:34.172386 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 25 17:11:34 crc kubenswrapper[4812]: I1125 17:11:34.262570 4812 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-h2ltc" Nov 25 17:11:34 crc kubenswrapper[4812]: I1125 17:11:34.361574 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 25 17:11:34 crc kubenswrapper[4812]: I1125 17:11:34.380074 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 25 17:11:34 crc kubenswrapper[4812]: I1125 17:11:34.437129 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 25 17:11:34 crc kubenswrapper[4812]: I1125 17:11:34.494218 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 25 17:11:34 crc kubenswrapper[4812]: I1125 17:11:34.559037 4812 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-cqj5l" Nov 25 17:11:34 crc kubenswrapper[4812]: I1125 17:11:34.566127 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 25 17:11:34 crc kubenswrapper[4812]: I1125 17:11:34.674699 4812 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-6z7sv" Nov 25 17:11:34 crc kubenswrapper[4812]: I1125 17:11:34.706075 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 25 17:11:34 crc kubenswrapper[4812]: I1125 17:11:34.725618 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wtld4" Nov 25 17:11:34 crc kubenswrapper[4812]: I1125 17:11:34.733554 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 17:11:34 crc kubenswrapper[4812]: I1125 17:11:34.794245 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 25 17:11:34 crc kubenswrapper[4812]: I1125 17:11:34.829677 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 25 17:11:34 crc kubenswrapper[4812]: I1125 17:11:34.831726 4812 scope.go:117] "RemoveContainer" containerID="8eb28280138f0530faa5c201d867e1d77c3034ef7338e9d1c27321b4e1aacf4e" Nov 25 17:11:34 crc kubenswrapper[4812]: I1125 17:11:34.831846 4812 scope.go:117] "RemoveContainer" containerID="b8442e1a39175832ba3feb326b882fbe2bb756a68c59c4d63fbbd625a9a0c3b8" Nov 25 17:11:34 crc kubenswrapper[4812]: I1125 17:11:34.832310 4812 scope.go:117] "RemoveContainer" containerID="6b93e40e0946b4e1ee373e03b1f209c808b6cc86be77b380b3da4174a7b4149b" Nov 25 17:11:34 crc kubenswrapper[4812]: I1125 17:11:34.832998 4812 scope.go:117] "RemoveContainer" containerID="e9d2fc83a7593f7c1635ae3b7c566c94ee6d7a1f74f371ae9f41e742697c99dc" Nov 25 17:11:34 crc kubenswrapper[4812]: I1125 17:11:34.834480 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 25 17:11:34 crc kubenswrapper[4812]: I1125 17:11:34.867089 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 25 17:11:35 crc kubenswrapper[4812]: I1125 17:11:35.003465 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 25 17:11:35 crc kubenswrapper[4812]: I1125 17:11:35.033721 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 25 17:11:35 crc kubenswrapper[4812]: I1125 17:11:35.126587 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-8vt9v" Nov 25 17:11:35 crc kubenswrapper[4812]: I1125 17:11:35.179132 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 25 17:11:35 crc kubenswrapper[4812]: I1125 17:11:35.192511 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 25 17:11:35 crc kubenswrapper[4812]: I1125 17:11:35.300116 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-8hgbb" Nov 25 17:11:35 crc kubenswrapper[4812]: I1125 17:11:35.306028 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 25 17:11:35 crc kubenswrapper[4812]: I1125 17:11:35.484476 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 25 17:11:35 crc kubenswrapper[4812]: I1125 17:11:35.502587 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 25 17:11:35 crc kubenswrapper[4812]: I1125 17:11:35.502723 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 25 17:11:35 crc kubenswrapper[4812]: I1125 17:11:35.520076 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 25 17:11:35 crc kubenswrapper[4812]: I1125 17:11:35.576680 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-sl66b" Nov 25 17:11:35 crc kubenswrapper[4812]: I1125 17:11:35.614703 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 25 17:11:35 crc kubenswrapper[4812]: I1125 17:11:35.796658 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 25 17:11:35 crc kubenswrapper[4812]: I1125 17:11:35.848518 4812 scope.go:117] "RemoveContainer" containerID="ee242746ec3b71c79ddc4928f6ad59d8abf30e80b8e44dce44c22d89ecbf2698" Nov 25 17:11:35 crc kubenswrapper[4812]: I1125 17:11:35.849635 4812 scope.go:117] "RemoveContainer" containerID="8d0939ff71bec4d973344176bda28eb7c70a23fa0d4332785bbb86e17daec07a" Nov 25 17:11:35 crc kubenswrapper[4812]: I1125 17:11:35.860764 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 25 17:11:35 crc kubenswrapper[4812]: I1125 17:11:35.865786 4812 scope.go:117] "RemoveContainer" containerID="9b1184f1cf904337a82631fc47973387b0346ad5e9c2e95de6b8372d1a7db551" Nov 25 17:11:35 crc kubenswrapper[4812]: I1125 17:11:35.920912 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-8n4tq" Nov 25 17:11:36 crc kubenswrapper[4812]: I1125 17:11:36.061164 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 25 17:11:36 crc kubenswrapper[4812]: I1125 17:11:36.154225 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 17:11:36 crc kubenswrapper[4812]: I1125 17:11:36.163857 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vncgw" event={"ID":"d6f00506-8ef7-46ec-9492-01e0005f90d3","Type":"ContainerStarted","Data":"26951728bd22ddf9128ef9b390a2a48dc2b79a815d052295941f0b49d8908c74"} Nov 25 17:11:36 crc kubenswrapper[4812]: I1125 17:11:36.164180 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vncgw" Nov 25 17:11:36 crc kubenswrapper[4812]: I1125 17:11:36.169892 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kx2fg" event={"ID":"bb01ec67-804d-4800-9ab4-e607563017b2","Type":"ContainerStarted","Data":"e7389c297d8d47fad0aaf43d7ff0a032be53ae4cc96aa221847814df41562ce6"} Nov 25 17:11:36 crc kubenswrapper[4812]: I1125 17:11:36.170287 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kx2fg" Nov 25 17:11:36 crc kubenswrapper[4812]: I1125 17:11:36.172812 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-858778c9dc-ncdvr" event={"ID":"16fc0b64-6599-4b8b-a0b7-b609dab9dd31","Type":"ContainerStarted","Data":"d0904e0c127b398a2026bb3b369f9781438e6c19edb0f9db0dd02d4d62420141"} Nov 25 17:11:36 crc kubenswrapper[4812]: I1125 17:11:36.173072 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-858778c9dc-ncdvr" Nov 25 17:11:36 crc kubenswrapper[4812]: I1125 17:11:36.174781 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qn86p" event={"ID":"18418f15-9ec8-48df-a761-118f45058d06","Type":"ContainerStarted","Data":"1e6fc2f4ce3568ba9385406591e5015c3d226f204d7399b020fde84df46fc091"} Nov 25 17:11:36 crc kubenswrapper[4812]: I1125 17:11:36.175237 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qn86p" Nov 25 17:11:36 crc kubenswrapper[4812]: I1125 17:11:36.176961 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-bsfwl" event={"ID":"ab19d0cd-1e29-41af-892c-8f25f12b7f1c","Type":"ContainerStarted","Data":"8320931eec2afe6e64ba9fdd5d9933663952285804e1d0cb067ed91ab7c5aabe"} Nov 25 17:11:36 crc kubenswrapper[4812]: I1125 17:11:36.177466 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-bsfwl" Nov 25 17:11:36 crc kubenswrapper[4812]: I1125 17:11:36.179449 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-pljcc" event={"ID":"48707b31-d8f9-4a7e-a8b9-2728249f0a49","Type":"ContainerStarted","Data":"71a31dadbb18bfff79412fb5c94240042f132ca14d1249b5139cf5f3a2dc2b90"} Nov 25 17:11:36 crc kubenswrapper[4812]: I1125 17:11:36.180104 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-pljcc" Nov 25 17:11:36 crc kubenswrapper[4812]: I1125 17:11:36.182436 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-2gwk8" event={"ID":"10dee73c-a6d8-429d-b5c0-9226eec6d1f3","Type":"ContainerStarted","Data":"9cb253fca8b10a1183ff12bb4a2884ad9659eb6e102e9291027885095bcd256d"} Nov 25 17:11:36 crc kubenswrapper[4812]: I1125 17:11:36.182748 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-2gwk8" Nov 25 17:11:36 crc kubenswrapper[4812]: I1125 17:11:36.258119 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 25 17:11:36 crc kubenswrapper[4812]: I1125 17:11:36.286274 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 25 17:11:36 crc kubenswrapper[4812]: I1125 17:11:36.319603 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 25 17:11:36 crc kubenswrapper[4812]: I1125 17:11:36.350416 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 25 17:11:36 crc kubenswrapper[4812]: I1125 17:11:36.425078 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 25 17:11:36 crc kubenswrapper[4812]: I1125 17:11:36.443687 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 25 17:11:36 crc kubenswrapper[4812]: I1125 17:11:36.475576 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 25 17:11:36 crc kubenswrapper[4812]: I1125 17:11:36.526296 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 25 17:11:36 crc kubenswrapper[4812]: I1125 17:11:36.565317 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 25 17:11:36 crc kubenswrapper[4812]: I1125 17:11:36.593098 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-lp5kf" Nov 25 17:11:36 crc kubenswrapper[4812]: I1125 17:11:36.608394 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 25 17:11:36 crc kubenswrapper[4812]: I1125 17:11:36.681119 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-x26zf" Nov 25 17:11:36 crc kubenswrapper[4812]: I1125 17:11:36.832289 4812 scope.go:117] "RemoveContainer" containerID="68de025290379e872cba6ed70e5d1c3d5de1edcc88586946094d5afd6b734b13" Nov 25 17:11:36 crc kubenswrapper[4812]: I1125 17:11:36.832364 4812 scope.go:117] "RemoveContainer" containerID="ea12be0fb67be05c0fb3a3a37cac5cbc679efaf06295f7c24c2aa14753bf9224" Nov 25 17:11:36 crc kubenswrapper[4812]: I1125 17:11:36.832412 4812 scope.go:117] "RemoveContainer" containerID="77492562f647df5b5e627db1c659255130e1fdab509b60836078f25baf5c86b4" Nov 25 17:11:36 crc kubenswrapper[4812]: I1125 17:11:36.832450 4812 scope.go:117] "RemoveContainer" containerID="82949c4052896799150c9ef1b6118840c51e178295d92a96631f45c8a2eccd94" Nov 25 17:11:36 crc kubenswrapper[4812]: I1125 17:11:36.832594 4812 scope.go:117] "RemoveContainer" containerID="468b00f31e6fe2ed94df170c5ba250d570c46c542a573b01b2e33967b6148db5" Nov 25 17:11:37 crc kubenswrapper[4812]: I1125 17:11:37.029884 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 25 17:11:37 crc kubenswrapper[4812]: I1125 17:11:37.096096 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 25 17:11:37 crc kubenswrapper[4812]: I1125 17:11:37.170392 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 25 17:11:37 crc kubenswrapper[4812]: I1125 17:11:37.194854 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 25 17:11:37 crc kubenswrapper[4812]: I1125 17:11:37.194901 4812 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="8ad5d89dd345e7340100873cdfdba709698c08cccb9980b09e786f24386e5949" exitCode=137 Nov 25 17:11:37 crc kubenswrapper[4812]: I1125 17:11:37.195567 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="528fcfdc592abd14a869d24dbc54d717a558314f2d4409c186e54ac3484bb644" Nov 25 17:11:37 crc kubenswrapper[4812]: I1125 17:11:37.201464 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 25 17:11:37 crc kubenswrapper[4812]: I1125 17:11:37.227448 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 25 17:11:37 crc kubenswrapper[4812]: I1125 17:11:37.227620 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 17:11:37 crc kubenswrapper[4812]: I1125 17:11:37.229985 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-585789bb75-mft5q" Nov 25 17:11:37 crc kubenswrapper[4812]: I1125 17:11:37.351106 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 17:11:37 crc kubenswrapper[4812]: I1125 17:11:37.351185 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 17:11:37 crc kubenswrapper[4812]: I1125 17:11:37.351204 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 17:11:37 crc kubenswrapper[4812]: I1125 17:11:37.351257 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 17:11:37 crc kubenswrapper[4812]: I1125 17:11:37.351325 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 17:11:37 crc kubenswrapper[4812]: I1125 17:11:37.351940 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 17:11:37 crc kubenswrapper[4812]: I1125 17:11:37.352304 4812 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Nov 25 17:11:37 crc kubenswrapper[4812]: I1125 17:11:37.352632 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 17:11:37 crc kubenswrapper[4812]: I1125 17:11:37.352668 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 17:11:37 crc kubenswrapper[4812]: I1125 17:11:37.352685 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 17:11:37 crc kubenswrapper[4812]: I1125 17:11:37.382098 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 17:11:37 crc kubenswrapper[4812]: I1125 17:11:37.414244 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 25 17:11:37 crc kubenswrapper[4812]: I1125 17:11:37.454621 4812 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Nov 25 17:11:37 crc kubenswrapper[4812]: I1125 17:11:37.454662 4812 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 25 17:11:37 crc kubenswrapper[4812]: I1125 17:11:37.454674 4812 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 25 17:11:37 crc kubenswrapper[4812]: I1125 17:11:37.454682 4812 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Nov 25 17:11:37 crc kubenswrapper[4812]: I1125 17:11:37.478952 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 25 17:11:37 crc kubenswrapper[4812]: I1125 17:11:37.598997 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 25 17:11:37 crc kubenswrapper[4812]: I1125 17:11:37.618808 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 25 17:11:37 crc kubenswrapper[4812]: I1125 17:11:37.832252 4812 scope.go:117] "RemoveContainer" containerID="fbb500c641b5cc074f3df777c793bf8e0e6b1cb01d65ac36e8ba24399c112100" Nov 25 17:11:37 crc kubenswrapper[4812]: I1125 17:11:37.832311 4812 scope.go:117] "RemoveContainer" containerID="3a4e07f58fa1c309f417fd4e1d09f6e3f4c296183b158bec11624c5828ad4bc5" Nov 25 17:11:37 crc kubenswrapper[4812]: I1125 17:11:37.832891 4812 scope.go:117] "RemoveContainer" containerID="c59fbae8b28246efe42480c116562d22cc798259dc7368bdd00fc02a1da79561" Nov 25 17:11:37 crc kubenswrapper[4812]: I1125 17:11:37.844422 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Nov 25 17:11:38 crc kubenswrapper[4812]: I1125 17:11:38.063774 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 25 17:11:38 crc kubenswrapper[4812]: I1125 17:11:38.208208 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-6jwl5" event={"ID":"4c649e41-10e8-4eee-bfc0-bf1a9409e421","Type":"ContainerStarted","Data":"f763e6def53ef6680fdd37c2b3963ac8806cbdf4f139d2c478c02c8b6c11bc82"} Nov 25 17:11:38 crc kubenswrapper[4812]: I1125 17:11:38.208421 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-6jwl5" Nov 25 17:11:38 crc kubenswrapper[4812]: I1125 17:11:38.212275 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-mrlbz" event={"ID":"1e943cab-36af-421d-b7a4-24010912da99","Type":"ContainerStarted","Data":"44b6603f283697bed90d345413bc900786687753071fdd09a085f4609a2cafc6"} Nov 25 17:11:38 crc kubenswrapper[4812]: I1125 17:11:38.212638 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-mrlbz" Nov 25 17:11:38 crc kubenswrapper[4812]: I1125 17:11:38.216307 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jth28" event={"ID":"39d8b8c1-7015-487a-9263-25531a65c48c","Type":"ContainerStarted","Data":"9255a62a3efbdde12a5b446a6bb3d19d29d1f7ce0e7e2a185ea79c23f2189f7a"} Nov 25 17:11:38 crc kubenswrapper[4812]: I1125 17:11:38.216688 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jth28" Nov 25 17:11:38 crc kubenswrapper[4812]: I1125 17:11:38.219331 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7dd4g" event={"ID":"2374c36a-5118-4a90-985c-1f80597d73af","Type":"ContainerStarted","Data":"41761b50358d2f82bc024d91642648774634763a99f4ebd40795a466547cbcae"} Nov 25 17:11:38 crc kubenswrapper[4812]: I1125 17:11:38.219566 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7dd4g" Nov 25 17:11:38 crc kubenswrapper[4812]: I1125 17:11:38.225847 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-jnszn" event={"ID":"36e2e4fb-0369-4347-b8e6-3f4bb2e6f88b","Type":"ContainerStarted","Data":"8c8b93f886a457c440fc8fa0fdf53f472899334242dcf79078abae333ab6e132"} Nov 25 17:11:38 crc kubenswrapper[4812]: I1125 17:11:38.226091 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-jnszn" Nov 25 17:11:38 crc kubenswrapper[4812]: I1125 17:11:38.234168 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-vnjdk" event={"ID":"4709cc72-b3b5-4bf6-ac84-45cc1f6bc2bf","Type":"ContainerStarted","Data":"90a922e35c1b4a0937e74ace9c9da9d54b2fca62a1e87d2a085c020941b151cb"} Nov 25 17:11:38 crc kubenswrapper[4812]: I1125 17:11:38.235013 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-vnjdk" Nov 25 17:11:38 crc kubenswrapper[4812]: I1125 17:11:38.244900 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 17:11:38 crc kubenswrapper[4812]: I1125 17:11:38.245626 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-69b8c86946-ww5m5" event={"ID":"d53b5c25-d66b-46c5-80a5-998eb9007598","Type":"ContainerStarted","Data":"40de332f9b7b8f3eeec82bea0ace0bcc408c4d94ccb2ece754d857c0cac492cd"} Nov 25 17:11:38 crc kubenswrapper[4812]: I1125 17:11:38.246234 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-69b8c86946-ww5m5" Nov 25 17:11:38 crc kubenswrapper[4812]: I1125 17:11:38.248142 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 25 17:11:38 crc kubenswrapper[4812]: I1125 17:11:38.360247 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 25 17:11:38 crc kubenswrapper[4812]: I1125 17:11:38.831663 4812 scope.go:117] "RemoveContainer" containerID="ae21c28290e91db5ab2a8cee0dcabe5e11851b94c6512e78f0c84a4fb8de75fb" Nov 25 17:11:38 crc kubenswrapper[4812]: I1125 17:11:38.832049 4812 scope.go:117] "RemoveContainer" containerID="91928ed0030dbd69ea3befde813631b393d823ccd40abf3ad6b9f0e037314141" Nov 25 17:11:39 crc kubenswrapper[4812]: I1125 17:11:39.255749 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6f7b877f74-qcc8n" event={"ID":"06fdd0d8-45b7-4787-9f77-24f76fccc672","Type":"ContainerStarted","Data":"d919c698e03ba4bb96af46234066ee12860c2e6d0b0d8fe6ec0153119d522719"} Nov 25 17:11:39 crc kubenswrapper[4812]: I1125 17:11:39.256308 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-6f7b877f74-qcc8n" Nov 25 17:11:39 crc kubenswrapper[4812]: I1125 17:11:39.258091 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-6kqrr" event={"ID":"15120e64-d800-43d8-b8c3-673e5854baef","Type":"ContainerStarted","Data":"a6146f6242e4bc9bcc8ee91953519d15c013ab76301dce75f08e208a05c822bd"} Nov 25 17:11:39 crc kubenswrapper[4812]: I1125 17:11:39.258548 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-6kqrr" Nov 25 17:11:39 crc kubenswrapper[4812]: I1125 17:11:39.261154 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"91bfadbe-a98d-49e4-88a9-97be162972a5","Type":"ContainerStarted","Data":"c7fc4a1d486bba2be7fb7c74315caecb8ef986b30e8dee8cc015c4972f176737"} Nov 25 17:11:40 crc kubenswrapper[4812]: I1125 17:11:40.270266 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-tvgf8" Nov 25 17:11:40 crc kubenswrapper[4812]: I1125 17:11:40.270898 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-tvgf8" Nov 25 17:11:40 crc kubenswrapper[4812]: I1125 17:11:40.327639 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-tvgf8" Nov 25 17:11:41 crc kubenswrapper[4812]: I1125 17:11:41.334243 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-tvgf8" Nov 25 17:11:42 crc kubenswrapper[4812]: I1125 17:11:42.649670 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-clwgz" Nov 25 17:11:42 crc kubenswrapper[4812]: I1125 17:11:42.651198 4812 scope.go:117] "RemoveContainer" containerID="1edcc34c74f4e39ca91a1585a77f25d802e986fe8e0bb797c7672a09fae3d173" Nov 25 17:11:42 crc kubenswrapper[4812]: E1125 17:11:42.651545 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=barbican-operator-controller-manager-86dc4d89c8-clwgz_openstack-operators(bac38f31-ec39-46b9-9bac-2920864fb8a2)\"" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-clwgz" podUID="bac38f31-ec39-46b9-9bac-2920864fb8a2" Nov 25 17:11:42 crc kubenswrapper[4812]: I1125 17:11:42.698345 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-jnszn" Nov 25 17:11:42 crc kubenswrapper[4812]: I1125 17:11:42.743308 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7dd4g" Nov 25 17:11:42 crc kubenswrapper[4812]: I1125 17:11:42.743758 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-69b8c86946-ww5m5" Nov 25 17:11:42 crc kubenswrapper[4812]: I1125 17:11:42.769593 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-774b86978c-zzhb4" Nov 25 17:11:42 crc kubenswrapper[4812]: I1125 17:11:42.808885 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jth28" Nov 25 17:11:42 crc kubenswrapper[4812]: I1125 17:11:42.884362 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-858778c9dc-ncdvr" Nov 25 17:11:42 crc kubenswrapper[4812]: I1125 17:11:42.907145 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-pljcc" Nov 25 17:11:42 crc kubenswrapper[4812]: I1125 17:11:42.944642 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-6jwl5" Nov 25 17:11:43 crc kubenswrapper[4812]: I1125 17:11:43.018651 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-vnjdk" Nov 25 17:11:43 crc kubenswrapper[4812]: I1125 17:11:43.053898 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-qn86p" Nov 25 17:11:43 crc kubenswrapper[4812]: I1125 17:11:43.085028 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-bsfwl" Nov 25 17:11:43 crc kubenswrapper[4812]: I1125 17:11:43.089625 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vncgw" Nov 25 17:11:43 crc kubenswrapper[4812]: I1125 17:11:43.135803 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-mrlbz" Nov 25 17:11:43 crc kubenswrapper[4812]: I1125 17:11:43.243628 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kx2fg" Nov 25 17:11:43 crc kubenswrapper[4812]: I1125 17:11:43.314808 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-2gwk8" Nov 25 17:11:43 crc kubenswrapper[4812]: I1125 17:11:43.337015 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-h9l5r" Nov 25 17:11:43 crc kubenswrapper[4812]: I1125 17:11:43.338012 4812 scope.go:117] "RemoveContainer" containerID="a77c0821038f28cdde6745abbd5de4bebce582ca274a475e132734ea20dd6eab" Nov 25 17:11:43 crc kubenswrapper[4812]: E1125 17:11:43.338247 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=telemetry-operator-controller-manager-567f98c9d-h9l5r_openstack-operators(07907797-7edd-48e0-bb69-e42ad740f173)\"" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-h9l5r" podUID="07907797-7edd-48e0-bb69-e42ad740f173" Nov 25 17:11:43 crc kubenswrapper[4812]: I1125 17:11:43.549120 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/watcher-operator-controller-manager-864885998-vzgsw" Nov 25 17:11:43 crc kubenswrapper[4812]: I1125 17:11:43.550010 4812 scope.go:117] "RemoveContainer" containerID="01d064eb252e3e08eaf974fd8346bdc265c9d1e86927bb4061b2b86cc0c59478" Nov 25 17:11:43 crc kubenswrapper[4812]: E1125 17:11:43.550241 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=watcher-operator-controller-manager-864885998-vzgsw_openstack-operators(98133284-26db-4073-a43c-f9572476153c)\"" pod="openstack-operators/watcher-operator-controller-manager-864885998-vzgsw" podUID="98133284-26db-4073-a43c-f9572476153c" Nov 25 17:11:44 crc kubenswrapper[4812]: I1125 17:11:44.421204 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 25 17:11:44 crc kubenswrapper[4812]: I1125 17:11:44.429322 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 25 17:11:52 crc kubenswrapper[4812]: I1125 17:11:52.350650 4812 scope.go:117] "RemoveContainer" containerID="0f8b2d066c37f9772f302e94c59a96d2b7e66380be7993bc1fa455b1fc8b9489" Nov 25 17:11:52 crc kubenswrapper[4812]: I1125 17:11:52.373650 4812 scope.go:117] "RemoveContainer" containerID="f8cfea7e90d42777c0a3c20bc2730ed7edd94a5c45e3615d2ee25735785cb7dc" Nov 25 17:11:52 crc kubenswrapper[4812]: I1125 17:11:52.392901 4812 scope.go:117] "RemoveContainer" containerID="9b2c13eb71d02f3604a231521a62f15a9121d3ba9aeb0af084eeaac920eccf6b" Nov 25 17:11:52 crc kubenswrapper[4812]: I1125 17:11:52.435367 4812 scope.go:117] "RemoveContainer" containerID="4eb57618a7cddfb38b8cade847a6ed872d79c39bed36ed8694d9559a2bde3b39" Nov 25 17:11:53 crc kubenswrapper[4812]: I1125 17:11:53.107894 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-6kqrr" Nov 25 17:11:55 crc kubenswrapper[4812]: I1125 17:11:55.838599 4812 scope.go:117] "RemoveContainer" containerID="01d064eb252e3e08eaf974fd8346bdc265c9d1e86927bb4061b2b86cc0c59478" Nov 25 17:11:56 crc kubenswrapper[4812]: I1125 17:11:56.433810 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-vzgsw" event={"ID":"98133284-26db-4073-a43c-f9572476153c","Type":"ContainerStarted","Data":"647b007229fdf706a3d3f13ee7f1526ed79343e9f6ac8cb35f401a0a96e73792"} Nov 25 17:11:56 crc kubenswrapper[4812]: I1125 17:11:56.434254 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-864885998-vzgsw" Nov 25 17:11:56 crc kubenswrapper[4812]: I1125 17:11:56.831491 4812 scope.go:117] "RemoveContainer" containerID="a77c0821038f28cdde6745abbd5de4bebce582ca274a475e132734ea20dd6eab" Nov 25 17:11:57 crc kubenswrapper[4812]: I1125 17:11:57.444621 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-h9l5r" event={"ID":"07907797-7edd-48e0-bb69-e42ad740f173","Type":"ContainerStarted","Data":"240b06706cc6776836cbdafcd3c93a3ebe7f80086b2335ba3e2646778a3ee036"} Nov 25 17:11:57 crc kubenswrapper[4812]: I1125 17:11:57.445368 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-h9l5r" Nov 25 17:11:57 crc kubenswrapper[4812]: I1125 17:11:57.832645 4812 scope.go:117] "RemoveContainer" containerID="1edcc34c74f4e39ca91a1585a77f25d802e986fe8e0bb797c7672a09fae3d173" Nov 25 17:11:58 crc kubenswrapper[4812]: I1125 17:11:58.454796 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-clwgz" event={"ID":"bac38f31-ec39-46b9-9bac-2920864fb8a2","Type":"ContainerStarted","Data":"7148183682dba24fd996318aa762226b28ecaaa06662863b5c9bd29de4218e85"} Nov 25 17:11:58 crc kubenswrapper[4812]: I1125 17:11:58.455325 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-clwgz" Nov 25 17:11:59 crc kubenswrapper[4812]: I1125 17:11:59.334688 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-sbl8g"] Nov 25 17:11:59 crc kubenswrapper[4812]: E1125 17:11:59.335553 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 25 17:11:59 crc kubenswrapper[4812]: I1125 17:11:59.335572 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 25 17:11:59 crc kubenswrapper[4812]: I1125 17:11:59.335741 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 25 17:11:59 crc kubenswrapper[4812]: I1125 17:11:59.336932 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sbl8g" Nov 25 17:11:59 crc kubenswrapper[4812]: I1125 17:11:59.348450 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sbl8g"] Nov 25 17:11:59 crc kubenswrapper[4812]: I1125 17:11:59.522007 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba51f635-a14b-4763-b313-fcd9a3792c21-catalog-content\") pod \"certified-operators-sbl8g\" (UID: \"ba51f635-a14b-4763-b313-fcd9a3792c21\") " pod="openshift-marketplace/certified-operators-sbl8g" Nov 25 17:11:59 crc kubenswrapper[4812]: I1125 17:11:59.522063 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba51f635-a14b-4763-b313-fcd9a3792c21-utilities\") pod \"certified-operators-sbl8g\" (UID: \"ba51f635-a14b-4763-b313-fcd9a3792c21\") " pod="openshift-marketplace/certified-operators-sbl8g" Nov 25 17:11:59 crc kubenswrapper[4812]: I1125 17:11:59.522227 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j5vl4\" (UniqueName: \"kubernetes.io/projected/ba51f635-a14b-4763-b313-fcd9a3792c21-kube-api-access-j5vl4\") pod \"certified-operators-sbl8g\" (UID: \"ba51f635-a14b-4763-b313-fcd9a3792c21\") " pod="openshift-marketplace/certified-operators-sbl8g" Nov 25 17:11:59 crc kubenswrapper[4812]: I1125 17:11:59.624133 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j5vl4\" (UniqueName: \"kubernetes.io/projected/ba51f635-a14b-4763-b313-fcd9a3792c21-kube-api-access-j5vl4\") pod \"certified-operators-sbl8g\" (UID: \"ba51f635-a14b-4763-b313-fcd9a3792c21\") " pod="openshift-marketplace/certified-operators-sbl8g" Nov 25 17:11:59 crc kubenswrapper[4812]: I1125 17:11:59.624307 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba51f635-a14b-4763-b313-fcd9a3792c21-catalog-content\") pod \"certified-operators-sbl8g\" (UID: \"ba51f635-a14b-4763-b313-fcd9a3792c21\") " pod="openshift-marketplace/certified-operators-sbl8g" Nov 25 17:11:59 crc kubenswrapper[4812]: I1125 17:11:59.624342 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba51f635-a14b-4763-b313-fcd9a3792c21-utilities\") pod \"certified-operators-sbl8g\" (UID: \"ba51f635-a14b-4763-b313-fcd9a3792c21\") " pod="openshift-marketplace/certified-operators-sbl8g" Nov 25 17:11:59 crc kubenswrapper[4812]: I1125 17:11:59.624805 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba51f635-a14b-4763-b313-fcd9a3792c21-catalog-content\") pod \"certified-operators-sbl8g\" (UID: \"ba51f635-a14b-4763-b313-fcd9a3792c21\") " pod="openshift-marketplace/certified-operators-sbl8g" Nov 25 17:11:59 crc kubenswrapper[4812]: I1125 17:11:59.624841 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba51f635-a14b-4763-b313-fcd9a3792c21-utilities\") pod \"certified-operators-sbl8g\" (UID: \"ba51f635-a14b-4763-b313-fcd9a3792c21\") " pod="openshift-marketplace/certified-operators-sbl8g" Nov 25 17:11:59 crc kubenswrapper[4812]: I1125 17:11:59.643778 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j5vl4\" (UniqueName: \"kubernetes.io/projected/ba51f635-a14b-4763-b313-fcd9a3792c21-kube-api-access-j5vl4\") pod \"certified-operators-sbl8g\" (UID: \"ba51f635-a14b-4763-b313-fcd9a3792c21\") " pod="openshift-marketplace/certified-operators-sbl8g" Nov 25 17:11:59 crc kubenswrapper[4812]: I1125 17:11:59.712970 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sbl8g" Nov 25 17:11:59 crc kubenswrapper[4812]: I1125 17:11:59.718287 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-s4zqk"] Nov 25 17:11:59 crc kubenswrapper[4812]: I1125 17:11:59.720174 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-s4zqk" Nov 25 17:11:59 crc kubenswrapper[4812]: I1125 17:11:59.740455 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-s4zqk"] Nov 25 17:11:59 crc kubenswrapper[4812]: I1125 17:11:59.828449 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/17239e6b-3f83-49f2-8ac8-3e710ef1565b-catalog-content\") pod \"certified-operators-s4zqk\" (UID: \"17239e6b-3f83-49f2-8ac8-3e710ef1565b\") " pod="openshift-marketplace/certified-operators-s4zqk" Nov 25 17:11:59 crc kubenswrapper[4812]: I1125 17:11:59.829139 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xx7kg\" (UniqueName: \"kubernetes.io/projected/17239e6b-3f83-49f2-8ac8-3e710ef1565b-kube-api-access-xx7kg\") pod \"certified-operators-s4zqk\" (UID: \"17239e6b-3f83-49f2-8ac8-3e710ef1565b\") " pod="openshift-marketplace/certified-operators-s4zqk" Nov 25 17:11:59 crc kubenswrapper[4812]: I1125 17:11:59.829387 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/17239e6b-3f83-49f2-8ac8-3e710ef1565b-utilities\") pod \"certified-operators-s4zqk\" (UID: \"17239e6b-3f83-49f2-8ac8-3e710ef1565b\") " pod="openshift-marketplace/certified-operators-s4zqk" Nov 25 17:11:59 crc kubenswrapper[4812]: I1125 17:11:59.937435 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/17239e6b-3f83-49f2-8ac8-3e710ef1565b-utilities\") pod \"certified-operators-s4zqk\" (UID: \"17239e6b-3f83-49f2-8ac8-3e710ef1565b\") " pod="openshift-marketplace/certified-operators-s4zqk" Nov 25 17:11:59 crc kubenswrapper[4812]: I1125 17:11:59.937642 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/17239e6b-3f83-49f2-8ac8-3e710ef1565b-catalog-content\") pod \"certified-operators-s4zqk\" (UID: \"17239e6b-3f83-49f2-8ac8-3e710ef1565b\") " pod="openshift-marketplace/certified-operators-s4zqk" Nov 25 17:11:59 crc kubenswrapper[4812]: I1125 17:11:59.937753 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xx7kg\" (UniqueName: \"kubernetes.io/projected/17239e6b-3f83-49f2-8ac8-3e710ef1565b-kube-api-access-xx7kg\") pod \"certified-operators-s4zqk\" (UID: \"17239e6b-3f83-49f2-8ac8-3e710ef1565b\") " pod="openshift-marketplace/certified-operators-s4zqk" Nov 25 17:11:59 crc kubenswrapper[4812]: I1125 17:11:59.939616 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/17239e6b-3f83-49f2-8ac8-3e710ef1565b-utilities\") pod \"certified-operators-s4zqk\" (UID: \"17239e6b-3f83-49f2-8ac8-3e710ef1565b\") " pod="openshift-marketplace/certified-operators-s4zqk" Nov 25 17:11:59 crc kubenswrapper[4812]: I1125 17:11:59.962436 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/17239e6b-3f83-49f2-8ac8-3e710ef1565b-catalog-content\") pod \"certified-operators-s4zqk\" (UID: \"17239e6b-3f83-49f2-8ac8-3e710ef1565b\") " pod="openshift-marketplace/certified-operators-s4zqk" Nov 25 17:12:00 crc kubenswrapper[4812]: I1125 17:12:00.003566 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xx7kg\" (UniqueName: \"kubernetes.io/projected/17239e6b-3f83-49f2-8ac8-3e710ef1565b-kube-api-access-xx7kg\") pod \"certified-operators-s4zqk\" (UID: \"17239e6b-3f83-49f2-8ac8-3e710ef1565b\") " pod="openshift-marketplace/certified-operators-s4zqk" Nov 25 17:12:00 crc kubenswrapper[4812]: I1125 17:12:00.156911 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-s4zqk" Nov 25 17:12:00 crc kubenswrapper[4812]: I1125 17:12:00.338663 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sbl8g"] Nov 25 17:12:00 crc kubenswrapper[4812]: I1125 17:12:00.479943 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sbl8g" event={"ID":"ba51f635-a14b-4763-b313-fcd9a3792c21","Type":"ContainerStarted","Data":"12505d66bd8520b7de472a181116d9bba790bd7298645f6143d9aa1ba82683d4"} Nov 25 17:12:00 crc kubenswrapper[4812]: I1125 17:12:00.479992 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sbl8g" event={"ID":"ba51f635-a14b-4763-b313-fcd9a3792c21","Type":"ContainerStarted","Data":"72e21980c1a1f73ac92b68ba6814c0febc86cfd97391c8702b0e27a81715ac53"} Nov 25 17:12:00 crc kubenswrapper[4812]: I1125 17:12:00.617952 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-s4zqk"] Nov 25 17:12:00 crc kubenswrapper[4812]: W1125 17:12:00.622176 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod17239e6b_3f83_49f2_8ac8_3e710ef1565b.slice/crio-604c863b59d77f9a11f7f3c0ddc2d349ec619d182978b64c8b5bb25916db8532 WatchSource:0}: Error finding container 604c863b59d77f9a11f7f3c0ddc2d349ec619d182978b64c8b5bb25916db8532: Status 404 returned error can't find the container with id 604c863b59d77f9a11f7f3c0ddc2d349ec619d182978b64c8b5bb25916db8532 Nov 25 17:12:01 crc kubenswrapper[4812]: I1125 17:12:01.119857 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-hs28w"] Nov 25 17:12:01 crc kubenswrapper[4812]: I1125 17:12:01.121767 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hs28w" Nov 25 17:12:01 crc kubenswrapper[4812]: I1125 17:12:01.128737 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hs28w"] Nov 25 17:12:01 crc kubenswrapper[4812]: I1125 17:12:01.258711 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d47079d-27b3-446c-b58a-e30d00537660-catalog-content\") pod \"certified-operators-hs28w\" (UID: \"9d47079d-27b3-446c-b58a-e30d00537660\") " pod="openshift-marketplace/certified-operators-hs28w" Nov 25 17:12:01 crc kubenswrapper[4812]: I1125 17:12:01.258754 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjvv6\" (UniqueName: \"kubernetes.io/projected/9d47079d-27b3-446c-b58a-e30d00537660-kube-api-access-zjvv6\") pod \"certified-operators-hs28w\" (UID: \"9d47079d-27b3-446c-b58a-e30d00537660\") " pod="openshift-marketplace/certified-operators-hs28w" Nov 25 17:12:01 crc kubenswrapper[4812]: I1125 17:12:01.258821 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d47079d-27b3-446c-b58a-e30d00537660-utilities\") pod \"certified-operators-hs28w\" (UID: \"9d47079d-27b3-446c-b58a-e30d00537660\") " pod="openshift-marketplace/certified-operators-hs28w" Nov 25 17:12:01 crc kubenswrapper[4812]: I1125 17:12:01.360132 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d47079d-27b3-446c-b58a-e30d00537660-catalog-content\") pod \"certified-operators-hs28w\" (UID: \"9d47079d-27b3-446c-b58a-e30d00537660\") " pod="openshift-marketplace/certified-operators-hs28w" Nov 25 17:12:01 crc kubenswrapper[4812]: I1125 17:12:01.360425 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjvv6\" (UniqueName: \"kubernetes.io/projected/9d47079d-27b3-446c-b58a-e30d00537660-kube-api-access-zjvv6\") pod \"certified-operators-hs28w\" (UID: \"9d47079d-27b3-446c-b58a-e30d00537660\") " pod="openshift-marketplace/certified-operators-hs28w" Nov 25 17:12:01 crc kubenswrapper[4812]: I1125 17:12:01.360621 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d47079d-27b3-446c-b58a-e30d00537660-utilities\") pod \"certified-operators-hs28w\" (UID: \"9d47079d-27b3-446c-b58a-e30d00537660\") " pod="openshift-marketplace/certified-operators-hs28w" Nov 25 17:12:01 crc kubenswrapper[4812]: I1125 17:12:01.360801 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d47079d-27b3-446c-b58a-e30d00537660-catalog-content\") pod \"certified-operators-hs28w\" (UID: \"9d47079d-27b3-446c-b58a-e30d00537660\") " pod="openshift-marketplace/certified-operators-hs28w" Nov 25 17:12:01 crc kubenswrapper[4812]: I1125 17:12:01.361134 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d47079d-27b3-446c-b58a-e30d00537660-utilities\") pod \"certified-operators-hs28w\" (UID: \"9d47079d-27b3-446c-b58a-e30d00537660\") " pod="openshift-marketplace/certified-operators-hs28w" Nov 25 17:12:01 crc kubenswrapper[4812]: I1125 17:12:01.382273 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjvv6\" (UniqueName: \"kubernetes.io/projected/9d47079d-27b3-446c-b58a-e30d00537660-kube-api-access-zjvv6\") pod \"certified-operators-hs28w\" (UID: \"9d47079d-27b3-446c-b58a-e30d00537660\") " pod="openshift-marketplace/certified-operators-hs28w" Nov 25 17:12:01 crc kubenswrapper[4812]: I1125 17:12:01.448968 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hs28w" Nov 25 17:12:01 crc kubenswrapper[4812]: I1125 17:12:01.526581 4812 generic.go:334] "Generic (PLEG): container finished" podID="17239e6b-3f83-49f2-8ac8-3e710ef1565b" containerID="c40af37826d4abdf914c10e6548f94c69232fb8a87c8b7c6f3157e5103ce1c58" exitCode=0 Nov 25 17:12:01 crc kubenswrapper[4812]: I1125 17:12:01.526881 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s4zqk" event={"ID":"17239e6b-3f83-49f2-8ac8-3e710ef1565b","Type":"ContainerDied","Data":"c40af37826d4abdf914c10e6548f94c69232fb8a87c8b7c6f3157e5103ce1c58"} Nov 25 17:12:01 crc kubenswrapper[4812]: I1125 17:12:01.526912 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s4zqk" event={"ID":"17239e6b-3f83-49f2-8ac8-3e710ef1565b","Type":"ContainerStarted","Data":"604c863b59d77f9a11f7f3c0ddc2d349ec619d182978b64c8b5bb25916db8532"} Nov 25 17:12:01 crc kubenswrapper[4812]: I1125 17:12:01.534439 4812 generic.go:334] "Generic (PLEG): container finished" podID="ba51f635-a14b-4763-b313-fcd9a3792c21" containerID="12505d66bd8520b7de472a181116d9bba790bd7298645f6143d9aa1ba82683d4" exitCode=0 Nov 25 17:12:01 crc kubenswrapper[4812]: I1125 17:12:01.534477 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sbl8g" event={"ID":"ba51f635-a14b-4763-b313-fcd9a3792c21","Type":"ContainerDied","Data":"12505d66bd8520b7de472a181116d9bba790bd7298645f6143d9aa1ba82683d4"} Nov 25 17:12:01 crc kubenswrapper[4812]: I1125 17:12:01.928454 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hs28w"] Nov 25 17:12:02 crc kubenswrapper[4812]: I1125 17:12:02.545873 4812 generic.go:334] "Generic (PLEG): container finished" podID="9d47079d-27b3-446c-b58a-e30d00537660" containerID="ef403abc05d52f2703347ad12fae894223e75cc38c893f95c840dc1e71bc26c7" exitCode=0 Nov 25 17:12:02 crc kubenswrapper[4812]: I1125 17:12:02.546182 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hs28w" event={"ID":"9d47079d-27b3-446c-b58a-e30d00537660","Type":"ContainerDied","Data":"ef403abc05d52f2703347ad12fae894223e75cc38c893f95c840dc1e71bc26c7"} Nov 25 17:12:02 crc kubenswrapper[4812]: I1125 17:12:02.546216 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hs28w" event={"ID":"9d47079d-27b3-446c-b58a-e30d00537660","Type":"ContainerStarted","Data":"ebb5465e71553b44b42276c942478d9efcf686c2ca13fb09e8170e75a5b8c901"} Nov 25 17:12:02 crc kubenswrapper[4812]: I1125 17:12:02.565579 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s4zqk" event={"ID":"17239e6b-3f83-49f2-8ac8-3e710ef1565b","Type":"ContainerStarted","Data":"a5a9e7bdff8e1f5d378038365d19484a1acd020cf1387be7213038aff1760d9f"} Nov 25 17:12:02 crc kubenswrapper[4812]: I1125 17:12:02.576099 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sbl8g" event={"ID":"ba51f635-a14b-4763-b313-fcd9a3792c21","Type":"ContainerStarted","Data":"b7da59f4a93b17654043d71643287117827fe8ba1388d7dcbd968aa322b8dfb5"} Nov 25 17:12:03 crc kubenswrapper[4812]: I1125 17:12:03.333621 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-h9l5r" Nov 25 17:12:03 crc kubenswrapper[4812]: I1125 17:12:03.525583 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-z72wj"] Nov 25 17:12:03 crc kubenswrapper[4812]: I1125 17:12:03.528116 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z72wj" Nov 25 17:12:03 crc kubenswrapper[4812]: I1125 17:12:03.538831 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-z72wj"] Nov 25 17:12:03 crc kubenswrapper[4812]: I1125 17:12:03.551352 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-864885998-vzgsw" Nov 25 17:12:03 crc kubenswrapper[4812]: I1125 17:12:03.602916 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b1f3d194-6274-433e-a9b2-136a62808c4b-utilities\") pod \"community-operators-z72wj\" (UID: \"b1f3d194-6274-433e-a9b2-136a62808c4b\") " pod="openshift-marketplace/community-operators-z72wj" Nov 25 17:12:03 crc kubenswrapper[4812]: I1125 17:12:03.603047 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zzg9t\" (UniqueName: \"kubernetes.io/projected/b1f3d194-6274-433e-a9b2-136a62808c4b-kube-api-access-zzg9t\") pod \"community-operators-z72wj\" (UID: \"b1f3d194-6274-433e-a9b2-136a62808c4b\") " pod="openshift-marketplace/community-operators-z72wj" Nov 25 17:12:03 crc kubenswrapper[4812]: I1125 17:12:03.603119 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b1f3d194-6274-433e-a9b2-136a62808c4b-catalog-content\") pod \"community-operators-z72wj\" (UID: \"b1f3d194-6274-433e-a9b2-136a62808c4b\") " pod="openshift-marketplace/community-operators-z72wj" Nov 25 17:12:03 crc kubenswrapper[4812]: I1125 17:12:03.606316 4812 generic.go:334] "Generic (PLEG): container finished" podID="ba51f635-a14b-4763-b313-fcd9a3792c21" containerID="b7da59f4a93b17654043d71643287117827fe8ba1388d7dcbd968aa322b8dfb5" exitCode=0 Nov 25 17:12:03 crc kubenswrapper[4812]: I1125 17:12:03.606362 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sbl8g" event={"ID":"ba51f635-a14b-4763-b313-fcd9a3792c21","Type":"ContainerDied","Data":"b7da59f4a93b17654043d71643287117827fe8ba1388d7dcbd968aa322b8dfb5"} Nov 25 17:12:03 crc kubenswrapper[4812]: I1125 17:12:03.622295 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hs28w" event={"ID":"9d47079d-27b3-446c-b58a-e30d00537660","Type":"ContainerStarted","Data":"a40a83599b05e5314d61d4b8124a8d61a47a20957d0944a3a18f631838db4e42"} Nov 25 17:12:03 crc kubenswrapper[4812]: I1125 17:12:03.628608 4812 generic.go:334] "Generic (PLEG): container finished" podID="17239e6b-3f83-49f2-8ac8-3e710ef1565b" containerID="a5a9e7bdff8e1f5d378038365d19484a1acd020cf1387be7213038aff1760d9f" exitCode=0 Nov 25 17:12:03 crc kubenswrapper[4812]: I1125 17:12:03.628681 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s4zqk" event={"ID":"17239e6b-3f83-49f2-8ac8-3e710ef1565b","Type":"ContainerDied","Data":"a5a9e7bdff8e1f5d378038365d19484a1acd020cf1387be7213038aff1760d9f"} Nov 25 17:12:03 crc kubenswrapper[4812]: I1125 17:12:03.704669 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b1f3d194-6274-433e-a9b2-136a62808c4b-utilities\") pod \"community-operators-z72wj\" (UID: \"b1f3d194-6274-433e-a9b2-136a62808c4b\") " pod="openshift-marketplace/community-operators-z72wj" Nov 25 17:12:03 crc kubenswrapper[4812]: I1125 17:12:03.704828 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zzg9t\" (UniqueName: \"kubernetes.io/projected/b1f3d194-6274-433e-a9b2-136a62808c4b-kube-api-access-zzg9t\") pod \"community-operators-z72wj\" (UID: \"b1f3d194-6274-433e-a9b2-136a62808c4b\") " pod="openshift-marketplace/community-operators-z72wj" Nov 25 17:12:03 crc kubenswrapper[4812]: I1125 17:12:03.704916 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b1f3d194-6274-433e-a9b2-136a62808c4b-catalog-content\") pod \"community-operators-z72wj\" (UID: \"b1f3d194-6274-433e-a9b2-136a62808c4b\") " pod="openshift-marketplace/community-operators-z72wj" Nov 25 17:12:03 crc kubenswrapper[4812]: I1125 17:12:03.705397 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b1f3d194-6274-433e-a9b2-136a62808c4b-catalog-content\") pod \"community-operators-z72wj\" (UID: \"b1f3d194-6274-433e-a9b2-136a62808c4b\") " pod="openshift-marketplace/community-operators-z72wj" Nov 25 17:12:03 crc kubenswrapper[4812]: I1125 17:12:03.706995 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b1f3d194-6274-433e-a9b2-136a62808c4b-utilities\") pod \"community-operators-z72wj\" (UID: \"b1f3d194-6274-433e-a9b2-136a62808c4b\") " pod="openshift-marketplace/community-operators-z72wj" Nov 25 17:12:03 crc kubenswrapper[4812]: I1125 17:12:03.726420 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zzg9t\" (UniqueName: \"kubernetes.io/projected/b1f3d194-6274-433e-a9b2-136a62808c4b-kube-api-access-zzg9t\") pod \"community-operators-z72wj\" (UID: \"b1f3d194-6274-433e-a9b2-136a62808c4b\") " pod="openshift-marketplace/community-operators-z72wj" Nov 25 17:12:03 crc kubenswrapper[4812]: I1125 17:12:03.746868 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-pczt2"] Nov 25 17:12:03 crc kubenswrapper[4812]: I1125 17:12:03.749101 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pczt2" Nov 25 17:12:03 crc kubenswrapper[4812]: I1125 17:12:03.760840 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pczt2"] Nov 25 17:12:03 crc kubenswrapper[4812]: I1125 17:12:03.848246 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z72wj" Nov 25 17:12:03 crc kubenswrapper[4812]: I1125 17:12:03.918677 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef-catalog-content\") pod \"redhat-marketplace-pczt2\" (UID: \"e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef\") " pod="openshift-marketplace/redhat-marketplace-pczt2" Nov 25 17:12:03 crc kubenswrapper[4812]: I1125 17:12:03.919129 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k5wz4\" (UniqueName: \"kubernetes.io/projected/e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef-kube-api-access-k5wz4\") pod \"redhat-marketplace-pczt2\" (UID: \"e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef\") " pod="openshift-marketplace/redhat-marketplace-pczt2" Nov 25 17:12:03 crc kubenswrapper[4812]: I1125 17:12:03.919197 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef-utilities\") pod \"redhat-marketplace-pczt2\" (UID: \"e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef\") " pod="openshift-marketplace/redhat-marketplace-pczt2" Nov 25 17:12:04 crc kubenswrapper[4812]: I1125 17:12:04.020559 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef-catalog-content\") pod \"redhat-marketplace-pczt2\" (UID: \"e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef\") " pod="openshift-marketplace/redhat-marketplace-pczt2" Nov 25 17:12:04 crc kubenswrapper[4812]: I1125 17:12:04.020669 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k5wz4\" (UniqueName: \"kubernetes.io/projected/e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef-kube-api-access-k5wz4\") pod \"redhat-marketplace-pczt2\" (UID: \"e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef\") " pod="openshift-marketplace/redhat-marketplace-pczt2" Nov 25 17:12:04 crc kubenswrapper[4812]: I1125 17:12:04.020721 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef-utilities\") pod \"redhat-marketplace-pczt2\" (UID: \"e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef\") " pod="openshift-marketplace/redhat-marketplace-pczt2" Nov 25 17:12:04 crc kubenswrapper[4812]: I1125 17:12:04.021203 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef-utilities\") pod \"redhat-marketplace-pczt2\" (UID: \"e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef\") " pod="openshift-marketplace/redhat-marketplace-pczt2" Nov 25 17:12:04 crc kubenswrapper[4812]: I1125 17:12:04.021546 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef-catalog-content\") pod \"redhat-marketplace-pczt2\" (UID: \"e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef\") " pod="openshift-marketplace/redhat-marketplace-pczt2" Nov 25 17:12:04 crc kubenswrapper[4812]: I1125 17:12:04.041179 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k5wz4\" (UniqueName: \"kubernetes.io/projected/e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef-kube-api-access-k5wz4\") pod \"redhat-marketplace-pczt2\" (UID: \"e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef\") " pod="openshift-marketplace/redhat-marketplace-pczt2" Nov 25 17:12:04 crc kubenswrapper[4812]: I1125 17:12:04.117999 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pczt2" Nov 25 17:12:04 crc kubenswrapper[4812]: I1125 17:12:04.303401 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-z72wj"] Nov 25 17:12:04 crc kubenswrapper[4812]: I1125 17:12:04.637026 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z72wj" event={"ID":"b1f3d194-6274-433e-a9b2-136a62808c4b","Type":"ContainerStarted","Data":"43d4de6a4f7a2548a41bc5cd4b388ec663d3625fd00c4d632184eb36685d0987"} Nov 25 17:12:04 crc kubenswrapper[4812]: I1125 17:12:04.640149 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sbl8g" event={"ID":"ba51f635-a14b-4763-b313-fcd9a3792c21","Type":"ContainerStarted","Data":"1a94b68e39d850c54b64983aea0134dfcee64b7f1907d0c3128ccd0b2e232483"} Nov 25 17:12:04 crc kubenswrapper[4812]: I1125 17:12:04.642023 4812 generic.go:334] "Generic (PLEG): container finished" podID="9d47079d-27b3-446c-b58a-e30d00537660" containerID="a40a83599b05e5314d61d4b8124a8d61a47a20957d0944a3a18f631838db4e42" exitCode=0 Nov 25 17:12:04 crc kubenswrapper[4812]: I1125 17:12:04.642067 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hs28w" event={"ID":"9d47079d-27b3-446c-b58a-e30d00537660","Type":"ContainerDied","Data":"a40a83599b05e5314d61d4b8124a8d61a47a20957d0944a3a18f631838db4e42"} Nov 25 17:12:04 crc kubenswrapper[4812]: I1125 17:12:04.678483 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-sbl8g" podStartSLOduration=3.226402373 podStartE2EDuration="5.678460626s" podCreationTimestamp="2025-11-25 17:11:59 +0000 UTC" firstStartedPulling="2025-11-25 17:12:01.537426786 +0000 UTC m=+1496.377568901" lastFinishedPulling="2025-11-25 17:12:03.989485059 +0000 UTC m=+1498.829627154" observedRunningTime="2025-11-25 17:12:04.661324764 +0000 UTC m=+1499.501466879" watchObservedRunningTime="2025-11-25 17:12:04.678460626 +0000 UTC m=+1499.518602721" Nov 25 17:12:04 crc kubenswrapper[4812]: I1125 17:12:04.720199 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pczt2"] Nov 25 17:12:05 crc kubenswrapper[4812]: I1125 17:12:05.651993 4812 generic.go:334] "Generic (PLEG): container finished" podID="e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef" containerID="69e702c01ef043daa07872629738bcac12b7de7a1d0bcdbe8b6579e81b2f425f" exitCode=0 Nov 25 17:12:05 crc kubenswrapper[4812]: I1125 17:12:05.652289 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pczt2" event={"ID":"e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef","Type":"ContainerDied","Data":"69e702c01ef043daa07872629738bcac12b7de7a1d0bcdbe8b6579e81b2f425f"} Nov 25 17:12:05 crc kubenswrapper[4812]: I1125 17:12:05.652315 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pczt2" event={"ID":"e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef","Type":"ContainerStarted","Data":"46b0bc6eea7b5420040bc769105c3b72baf3443800768f4ba533e0e1075abd49"} Nov 25 17:12:05 crc kubenswrapper[4812]: I1125 17:12:05.657249 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s4zqk" event={"ID":"17239e6b-3f83-49f2-8ac8-3e710ef1565b","Type":"ContainerStarted","Data":"af0dddbd48a859897b6a29b2028092e4960cbf8f7171e1f1e01fd6723169c888"} Nov 25 17:12:05 crc kubenswrapper[4812]: I1125 17:12:05.665319 4812 generic.go:334] "Generic (PLEG): container finished" podID="b1f3d194-6274-433e-a9b2-136a62808c4b" containerID="42da5c4b7ea5fca93fb824391cc751789096f34f2193408d935aa929561c214d" exitCode=0 Nov 25 17:12:05 crc kubenswrapper[4812]: I1125 17:12:05.665642 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z72wj" event={"ID":"b1f3d194-6274-433e-a9b2-136a62808c4b","Type":"ContainerDied","Data":"42da5c4b7ea5fca93fb824391cc751789096f34f2193408d935aa929561c214d"} Nov 25 17:12:05 crc kubenswrapper[4812]: I1125 17:12:05.672033 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hs28w" event={"ID":"9d47079d-27b3-446c-b58a-e30d00537660","Type":"ContainerStarted","Data":"2ae9dd929bef99ac31a90900963e6b94e7144ca7e223ba33175fa15985a87ae0"} Nov 25 17:12:05 crc kubenswrapper[4812]: I1125 17:12:05.703148 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-s4zqk" podStartSLOduration=3.881777333 podStartE2EDuration="6.703116914s" podCreationTimestamp="2025-11-25 17:11:59 +0000 UTC" firstStartedPulling="2025-11-25 17:12:01.531113225 +0000 UTC m=+1496.371255320" lastFinishedPulling="2025-11-25 17:12:04.352452806 +0000 UTC m=+1499.192594901" observedRunningTime="2025-11-25 17:12:05.697984785 +0000 UTC m=+1500.538126890" watchObservedRunningTime="2025-11-25 17:12:05.703116914 +0000 UTC m=+1500.543259009" Nov 25 17:12:05 crc kubenswrapper[4812]: I1125 17:12:05.784461 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-hs28w" podStartSLOduration=2.148429462 podStartE2EDuration="4.784441746s" podCreationTimestamp="2025-11-25 17:12:01 +0000 UTC" firstStartedPulling="2025-11-25 17:12:02.552046122 +0000 UTC m=+1497.392188217" lastFinishedPulling="2025-11-25 17:12:05.188058406 +0000 UTC m=+1500.028200501" observedRunningTime="2025-11-25 17:12:05.777417037 +0000 UTC m=+1500.617559132" watchObservedRunningTime="2025-11-25 17:12:05.784441746 +0000 UTC m=+1500.624583841" Nov 25 17:12:07 crc kubenswrapper[4812]: I1125 17:12:07.701230 4812 generic.go:334] "Generic (PLEG): container finished" podID="e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef" containerID="799c7f8b6ee6ffa8ce4cb56ee874b011aeffb8ceead6172ecb9ac644895f2f62" exitCode=0 Nov 25 17:12:07 crc kubenswrapper[4812]: I1125 17:12:07.701294 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pczt2" event={"ID":"e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef","Type":"ContainerDied","Data":"799c7f8b6ee6ffa8ce4cb56ee874b011aeffb8ceead6172ecb9ac644895f2f62"} Nov 25 17:12:07 crc kubenswrapper[4812]: I1125 17:12:07.705709 4812 generic.go:334] "Generic (PLEG): container finished" podID="b1f3d194-6274-433e-a9b2-136a62808c4b" containerID="31dc3c6f7bfa2b3db2813b3c4768386035d78eb4e7e30f9026638722e0ca45a7" exitCode=0 Nov 25 17:12:07 crc kubenswrapper[4812]: I1125 17:12:07.705747 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z72wj" event={"ID":"b1f3d194-6274-433e-a9b2-136a62808c4b","Type":"ContainerDied","Data":"31dc3c6f7bfa2b3db2813b3c4768386035d78eb4e7e30f9026638722e0ca45a7"} Nov 25 17:12:07 crc kubenswrapper[4812]: I1125 17:12:07.920432 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-bpqbb"] Nov 25 17:12:07 crc kubenswrapper[4812]: I1125 17:12:07.922304 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bpqbb" Nov 25 17:12:07 crc kubenswrapper[4812]: I1125 17:12:07.937894 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bpqbb"] Nov 25 17:12:07 crc kubenswrapper[4812]: I1125 17:12:07.999577 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e1417f9-84df-4f66-878f-5b70e2c90cf1-utilities\") pod \"redhat-operators-bpqbb\" (UID: \"7e1417f9-84df-4f66-878f-5b70e2c90cf1\") " pod="openshift-marketplace/redhat-operators-bpqbb" Nov 25 17:12:07 crc kubenswrapper[4812]: I1125 17:12:07.999636 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e1417f9-84df-4f66-878f-5b70e2c90cf1-catalog-content\") pod \"redhat-operators-bpqbb\" (UID: \"7e1417f9-84df-4f66-878f-5b70e2c90cf1\") " pod="openshift-marketplace/redhat-operators-bpqbb" Nov 25 17:12:07 crc kubenswrapper[4812]: I1125 17:12:07.999786 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7ccpr\" (UniqueName: \"kubernetes.io/projected/7e1417f9-84df-4f66-878f-5b70e2c90cf1-kube-api-access-7ccpr\") pod \"redhat-operators-bpqbb\" (UID: \"7e1417f9-84df-4f66-878f-5b70e2c90cf1\") " pod="openshift-marketplace/redhat-operators-bpqbb" Nov 25 17:12:08 crc kubenswrapper[4812]: I1125 17:12:08.102024 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7ccpr\" (UniqueName: \"kubernetes.io/projected/7e1417f9-84df-4f66-878f-5b70e2c90cf1-kube-api-access-7ccpr\") pod \"redhat-operators-bpqbb\" (UID: \"7e1417f9-84df-4f66-878f-5b70e2c90cf1\") " pod="openshift-marketplace/redhat-operators-bpqbb" Nov 25 17:12:08 crc kubenswrapper[4812]: I1125 17:12:08.102113 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e1417f9-84df-4f66-878f-5b70e2c90cf1-utilities\") pod \"redhat-operators-bpqbb\" (UID: \"7e1417f9-84df-4f66-878f-5b70e2c90cf1\") " pod="openshift-marketplace/redhat-operators-bpqbb" Nov 25 17:12:08 crc kubenswrapper[4812]: I1125 17:12:08.102148 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e1417f9-84df-4f66-878f-5b70e2c90cf1-catalog-content\") pod \"redhat-operators-bpqbb\" (UID: \"7e1417f9-84df-4f66-878f-5b70e2c90cf1\") " pod="openshift-marketplace/redhat-operators-bpqbb" Nov 25 17:12:08 crc kubenswrapper[4812]: I1125 17:12:08.102794 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e1417f9-84df-4f66-878f-5b70e2c90cf1-utilities\") pod \"redhat-operators-bpqbb\" (UID: \"7e1417f9-84df-4f66-878f-5b70e2c90cf1\") " pod="openshift-marketplace/redhat-operators-bpqbb" Nov 25 17:12:08 crc kubenswrapper[4812]: I1125 17:12:08.102815 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e1417f9-84df-4f66-878f-5b70e2c90cf1-catalog-content\") pod \"redhat-operators-bpqbb\" (UID: \"7e1417f9-84df-4f66-878f-5b70e2c90cf1\") " pod="openshift-marketplace/redhat-operators-bpqbb" Nov 25 17:12:08 crc kubenswrapper[4812]: I1125 17:12:08.135042 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7ccpr\" (UniqueName: \"kubernetes.io/projected/7e1417f9-84df-4f66-878f-5b70e2c90cf1-kube-api-access-7ccpr\") pod \"redhat-operators-bpqbb\" (UID: \"7e1417f9-84df-4f66-878f-5b70e2c90cf1\") " pod="openshift-marketplace/redhat-operators-bpqbb" Nov 25 17:12:08 crc kubenswrapper[4812]: I1125 17:12:08.254134 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bpqbb" Nov 25 17:12:08 crc kubenswrapper[4812]: W1125 17:12:08.832103 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7e1417f9_84df_4f66_878f_5b70e2c90cf1.slice/crio-3cd5bb1f1d3f35002025caa858d2e3ba0e5931936071a0111a781617a8742e56 WatchSource:0}: Error finding container 3cd5bb1f1d3f35002025caa858d2e3ba0e5931936071a0111a781617a8742e56: Status 404 returned error can't find the container with id 3cd5bb1f1d3f35002025caa858d2e3ba0e5931936071a0111a781617a8742e56 Nov 25 17:12:08 crc kubenswrapper[4812]: I1125 17:12:08.842612 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bpqbb"] Nov 25 17:12:09 crc kubenswrapper[4812]: I1125 17:12:09.713587 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-sbl8g" Nov 25 17:12:09 crc kubenswrapper[4812]: I1125 17:12:09.713997 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-sbl8g" Nov 25 17:12:09 crc kubenswrapper[4812]: I1125 17:12:09.737506 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pczt2" event={"ID":"e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef","Type":"ContainerStarted","Data":"206c50ec1bbbb7bd4bb422a6ebc1678cb0c4cc847ab71809faf8d250d72e152b"} Nov 25 17:12:09 crc kubenswrapper[4812]: I1125 17:12:09.740209 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bpqbb" event={"ID":"7e1417f9-84df-4f66-878f-5b70e2c90cf1","Type":"ContainerStarted","Data":"3cd5bb1f1d3f35002025caa858d2e3ba0e5931936071a0111a781617a8742e56"} Nov 25 17:12:09 crc kubenswrapper[4812]: I1125 17:12:09.772072 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-pczt2" podStartSLOduration=3.912333819 podStartE2EDuration="6.772049003s" podCreationTimestamp="2025-11-25 17:12:03 +0000 UTC" firstStartedPulling="2025-11-25 17:12:05.653740443 +0000 UTC m=+1500.493882538" lastFinishedPulling="2025-11-25 17:12:08.513455617 +0000 UTC m=+1503.353597722" observedRunningTime="2025-11-25 17:12:09.765763823 +0000 UTC m=+1504.605905918" watchObservedRunningTime="2025-11-25 17:12:09.772049003 +0000 UTC m=+1504.612191098" Nov 25 17:12:09 crc kubenswrapper[4812]: I1125 17:12:09.777001 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-sbl8g" Nov 25 17:12:09 crc kubenswrapper[4812]: I1125 17:12:09.841609 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-sbl8g" Nov 25 17:12:10 crc kubenswrapper[4812]: I1125 17:12:10.160704 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-s4zqk" Nov 25 17:12:10 crc kubenswrapper[4812]: I1125 17:12:10.160771 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-s4zqk" Nov 25 17:12:10 crc kubenswrapper[4812]: I1125 17:12:10.230490 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-s4zqk" Nov 25 17:12:10 crc kubenswrapper[4812]: I1125 17:12:10.754662 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z72wj" event={"ID":"b1f3d194-6274-433e-a9b2-136a62808c4b","Type":"ContainerStarted","Data":"e06e41eef9e6ce69bb0869c83e5a9b2b62853471c61f81634019b5d8218e9d5b"} Nov 25 17:12:10 crc kubenswrapper[4812]: I1125 17:12:10.757238 4812 generic.go:334] "Generic (PLEG): container finished" podID="7e1417f9-84df-4f66-878f-5b70e2c90cf1" containerID="aa37f05bc93476d4bda15ca0b9a9540bc9894f3432fd2e8c74932ea0a12e6f4d" exitCode=0 Nov 25 17:12:10 crc kubenswrapper[4812]: I1125 17:12:10.757309 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bpqbb" event={"ID":"7e1417f9-84df-4f66-878f-5b70e2c90cf1","Type":"ContainerDied","Data":"aa37f05bc93476d4bda15ca0b9a9540bc9894f3432fd2e8c74932ea0a12e6f4d"} Nov 25 17:12:10 crc kubenswrapper[4812]: I1125 17:12:10.822601 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-z72wj" podStartSLOduration=4.622935847 podStartE2EDuration="7.822582698s" podCreationTimestamp="2025-11-25 17:12:03 +0000 UTC" firstStartedPulling="2025-11-25 17:12:05.667619806 +0000 UTC m=+1500.507761901" lastFinishedPulling="2025-11-25 17:12:08.867266657 +0000 UTC m=+1503.707408752" observedRunningTime="2025-11-25 17:12:10.790472942 +0000 UTC m=+1505.630615047" watchObservedRunningTime="2025-11-25 17:12:10.822582698 +0000 UTC m=+1505.662724793" Nov 25 17:12:10 crc kubenswrapper[4812]: I1125 17:12:10.829994 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-s4zqk" Nov 25 17:12:11 crc kubenswrapper[4812]: I1125 17:12:11.449693 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-hs28w" Nov 25 17:12:11 crc kubenswrapper[4812]: I1125 17:12:11.450991 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-hs28w" Nov 25 17:12:11 crc kubenswrapper[4812]: I1125 17:12:11.510986 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-hs28w" Nov 25 17:12:11 crc kubenswrapper[4812]: I1125 17:12:11.843482 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-hs28w" Nov 25 17:12:12 crc kubenswrapper[4812]: I1125 17:12:12.651656 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-clwgz" Nov 25 17:12:13 crc kubenswrapper[4812]: I1125 17:12:13.848745 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-z72wj" Nov 25 17:12:13 crc kubenswrapper[4812]: I1125 17:12:13.849369 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-z72wj" Nov 25 17:12:13 crc kubenswrapper[4812]: I1125 17:12:13.924643 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-z72wj" Nov 25 17:12:14 crc kubenswrapper[4812]: I1125 17:12:14.119617 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-pczt2" Nov 25 17:12:14 crc kubenswrapper[4812]: I1125 17:12:14.119660 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-pczt2" Nov 25 17:12:14 crc kubenswrapper[4812]: I1125 17:12:14.179878 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-pczt2" Nov 25 17:12:14 crc kubenswrapper[4812]: I1125 17:12:14.467736 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-6f7b877f74-qcc8n" Nov 25 17:12:14 crc kubenswrapper[4812]: I1125 17:12:14.915653 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-z72wj" Nov 25 17:12:14 crc kubenswrapper[4812]: I1125 17:12:14.922080 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-pczt2" Nov 25 17:12:16 crc kubenswrapper[4812]: I1125 17:12:16.819621 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bpqbb" event={"ID":"7e1417f9-84df-4f66-878f-5b70e2c90cf1","Type":"ContainerStarted","Data":"6cf7fd02552106f44f9ce015dc065c43c4a5ba0ceecf4581debd7c64b0cccd0e"} Nov 25 17:12:18 crc kubenswrapper[4812]: I1125 17:12:18.714874 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hs28w"] Nov 25 17:12:18 crc kubenswrapper[4812]: I1125 17:12:18.715435 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-hs28w" podUID="9d47079d-27b3-446c-b58a-e30d00537660" containerName="registry-server" containerID="cri-o://2ae9dd929bef99ac31a90900963e6b94e7144ca7e223ba33175fa15985a87ae0" gracePeriod=2 Nov 25 17:12:19 crc kubenswrapper[4812]: I1125 17:12:19.314133 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-s4zqk"] Nov 25 17:12:19 crc kubenswrapper[4812]: I1125 17:12:19.314376 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-s4zqk" podUID="17239e6b-3f83-49f2-8ac8-3e710ef1565b" containerName="registry-server" containerID="cri-o://af0dddbd48a859897b6a29b2028092e4960cbf8f7171e1f1e01fd6723169c888" gracePeriod=2 Nov 25 17:12:19 crc kubenswrapper[4812]: I1125 17:12:19.912563 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-sbl8g"] Nov 25 17:12:19 crc kubenswrapper[4812]: I1125 17:12:19.913229 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-sbl8g" podUID="ba51f635-a14b-4763-b313-fcd9a3792c21" containerName="registry-server" containerID="cri-o://1a94b68e39d850c54b64983aea0134dfcee64b7f1907d0c3128ccd0b2e232483" gracePeriod=2 Nov 25 17:12:20 crc kubenswrapper[4812]: E1125 17:12:20.158703 4812 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of af0dddbd48a859897b6a29b2028092e4960cbf8f7171e1f1e01fd6723169c888 is running failed: container process not found" containerID="af0dddbd48a859897b6a29b2028092e4960cbf8f7171e1f1e01fd6723169c888" cmd=["grpc_health_probe","-addr=:50051"] Nov 25 17:12:20 crc kubenswrapper[4812]: E1125 17:12:20.159167 4812 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of af0dddbd48a859897b6a29b2028092e4960cbf8f7171e1f1e01fd6723169c888 is running failed: container process not found" containerID="af0dddbd48a859897b6a29b2028092e4960cbf8f7171e1f1e01fd6723169c888" cmd=["grpc_health_probe","-addr=:50051"] Nov 25 17:12:20 crc kubenswrapper[4812]: E1125 17:12:20.159705 4812 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of af0dddbd48a859897b6a29b2028092e4960cbf8f7171e1f1e01fd6723169c888 is running failed: container process not found" containerID="af0dddbd48a859897b6a29b2028092e4960cbf8f7171e1f1e01fd6723169c888" cmd=["grpc_health_probe","-addr=:50051"] Nov 25 17:12:20 crc kubenswrapper[4812]: E1125 17:12:20.159778 4812 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of af0dddbd48a859897b6a29b2028092e4960cbf8f7171e1f1e01fd6723169c888 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/certified-operators-s4zqk" podUID="17239e6b-3f83-49f2-8ac8-3e710ef1565b" containerName="registry-server" Nov 25 17:12:20 crc kubenswrapper[4812]: I1125 17:12:20.517085 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tvgf8"] Nov 25 17:12:20 crc kubenswrapper[4812]: I1125 17:12:20.517329 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-tvgf8" podUID="466d78b1-4962-4530-8f14-23f1077f5e37" containerName="registry-server" containerID="cri-o://f5c8903c7f8a802821f94b4a9e0051bc17c1bb4a5b0d78088c51eab4b6907508" gracePeriod=2 Nov 25 17:12:21 crc kubenswrapper[4812]: I1125 17:12:21.317201 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-z72wj"] Nov 25 17:12:21 crc kubenswrapper[4812]: I1125 17:12:21.317917 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-z72wj" podUID="b1f3d194-6274-433e-a9b2-136a62808c4b" containerName="registry-server" containerID="cri-o://e06e41eef9e6ce69bb0869c83e5a9b2b62853471c61f81634019b5d8218e9d5b" gracePeriod=2 Nov 25 17:12:21 crc kubenswrapper[4812]: E1125 17:12:21.450221 4812 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 2ae9dd929bef99ac31a90900963e6b94e7144ca7e223ba33175fa15985a87ae0 is running failed: container process not found" containerID="2ae9dd929bef99ac31a90900963e6b94e7144ca7e223ba33175fa15985a87ae0" cmd=["grpc_health_probe","-addr=:50051"] Nov 25 17:12:21 crc kubenswrapper[4812]: E1125 17:12:21.450554 4812 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 2ae9dd929bef99ac31a90900963e6b94e7144ca7e223ba33175fa15985a87ae0 is running failed: container process not found" containerID="2ae9dd929bef99ac31a90900963e6b94e7144ca7e223ba33175fa15985a87ae0" cmd=["grpc_health_probe","-addr=:50051"] Nov 25 17:12:21 crc kubenswrapper[4812]: E1125 17:12:21.450998 4812 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 2ae9dd929bef99ac31a90900963e6b94e7144ca7e223ba33175fa15985a87ae0 is running failed: container process not found" containerID="2ae9dd929bef99ac31a90900963e6b94e7144ca7e223ba33175fa15985a87ae0" cmd=["grpc_health_probe","-addr=:50051"] Nov 25 17:12:21 crc kubenswrapper[4812]: E1125 17:12:21.451032 4812 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 2ae9dd929bef99ac31a90900963e6b94e7144ca7e223ba33175fa15985a87ae0 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/certified-operators-hs28w" podUID="9d47079d-27b3-446c-b58a-e30d00537660" containerName="registry-server" Nov 25 17:12:23 crc kubenswrapper[4812]: E1125 17:12:23.849996 4812 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e06e41eef9e6ce69bb0869c83e5a9b2b62853471c61f81634019b5d8218e9d5b is running failed: container process not found" containerID="e06e41eef9e6ce69bb0869c83e5a9b2b62853471c61f81634019b5d8218e9d5b" cmd=["grpc_health_probe","-addr=:50051"] Nov 25 17:12:23 crc kubenswrapper[4812]: E1125 17:12:23.850979 4812 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e06e41eef9e6ce69bb0869c83e5a9b2b62853471c61f81634019b5d8218e9d5b is running failed: container process not found" containerID="e06e41eef9e6ce69bb0869c83e5a9b2b62853471c61f81634019b5d8218e9d5b" cmd=["grpc_health_probe","-addr=:50051"] Nov 25 17:12:23 crc kubenswrapper[4812]: E1125 17:12:23.851324 4812 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e06e41eef9e6ce69bb0869c83e5a9b2b62853471c61f81634019b5d8218e9d5b is running failed: container process not found" containerID="e06e41eef9e6ce69bb0869c83e5a9b2b62853471c61f81634019b5d8218e9d5b" cmd=["grpc_health_probe","-addr=:50051"] Nov 25 17:12:23 crc kubenswrapper[4812]: E1125 17:12:23.851353 4812 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e06e41eef9e6ce69bb0869c83e5a9b2b62853471c61f81634019b5d8218e9d5b is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/community-operators-z72wj" podUID="b1f3d194-6274-433e-a9b2-136a62808c4b" containerName="registry-server" Nov 25 17:12:24 crc kubenswrapper[4812]: I1125 17:12:24.317279 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pczt2"] Nov 25 17:12:24 crc kubenswrapper[4812]: I1125 17:12:24.317630 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-pczt2" podUID="e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef" containerName="registry-server" containerID="cri-o://206c50ec1bbbb7bd4bb422a6ebc1678cb0c4cc847ab71809faf8d250d72e152b" gracePeriod=2 Nov 25 17:12:27 crc kubenswrapper[4812]: I1125 17:12:27.627156 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-s4zqk_17239e6b-3f83-49f2-8ac8-3e710ef1565b/registry-server/0.log" Nov 25 17:12:27 crc kubenswrapper[4812]: I1125 17:12:27.630070 4812 generic.go:334] "Generic (PLEG): container finished" podID="17239e6b-3f83-49f2-8ac8-3e710ef1565b" containerID="af0dddbd48a859897b6a29b2028092e4960cbf8f7171e1f1e01fd6723169c888" exitCode=-1 Nov 25 17:12:27 crc kubenswrapper[4812]: I1125 17:12:27.630137 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s4zqk" event={"ID":"17239e6b-3f83-49f2-8ac8-3e710ef1565b","Type":"ContainerDied","Data":"af0dddbd48a859897b6a29b2028092e4960cbf8f7171e1f1e01fd6723169c888"} Nov 25 17:12:29 crc kubenswrapper[4812]: I1125 17:12:29.366399 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-z72wj_b1f3d194-6274-433e-a9b2-136a62808c4b/registry-server/0.log" Nov 25 17:12:29 crc kubenswrapper[4812]: I1125 17:12:29.367772 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z72wj" Nov 25 17:12:29 crc kubenswrapper[4812]: I1125 17:12:29.451256 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b1f3d194-6274-433e-a9b2-136a62808c4b-utilities\") pod \"b1f3d194-6274-433e-a9b2-136a62808c4b\" (UID: \"b1f3d194-6274-433e-a9b2-136a62808c4b\") " Nov 25 17:12:29 crc kubenswrapper[4812]: I1125 17:12:29.451406 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zzg9t\" (UniqueName: \"kubernetes.io/projected/b1f3d194-6274-433e-a9b2-136a62808c4b-kube-api-access-zzg9t\") pod \"b1f3d194-6274-433e-a9b2-136a62808c4b\" (UID: \"b1f3d194-6274-433e-a9b2-136a62808c4b\") " Nov 25 17:12:29 crc kubenswrapper[4812]: I1125 17:12:29.451482 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b1f3d194-6274-433e-a9b2-136a62808c4b-catalog-content\") pod \"b1f3d194-6274-433e-a9b2-136a62808c4b\" (UID: \"b1f3d194-6274-433e-a9b2-136a62808c4b\") " Nov 25 17:12:29 crc kubenswrapper[4812]: I1125 17:12:29.452011 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b1f3d194-6274-433e-a9b2-136a62808c4b-utilities" (OuterVolumeSpecName: "utilities") pod "b1f3d194-6274-433e-a9b2-136a62808c4b" (UID: "b1f3d194-6274-433e-a9b2-136a62808c4b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:12:29 crc kubenswrapper[4812]: I1125 17:12:29.459025 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b1f3d194-6274-433e-a9b2-136a62808c4b-kube-api-access-zzg9t" (OuterVolumeSpecName: "kube-api-access-zzg9t") pod "b1f3d194-6274-433e-a9b2-136a62808c4b" (UID: "b1f3d194-6274-433e-a9b2-136a62808c4b"). InnerVolumeSpecName "kube-api-access-zzg9t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:12:29 crc kubenswrapper[4812]: I1125 17:12:29.553261 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b1f3d194-6274-433e-a9b2-136a62808c4b-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:12:29 crc kubenswrapper[4812]: I1125 17:12:29.553292 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zzg9t\" (UniqueName: \"kubernetes.io/projected/b1f3d194-6274-433e-a9b2-136a62808c4b-kube-api-access-zzg9t\") on node \"crc\" DevicePath \"\"" Nov 25 17:12:30 crc kubenswrapper[4812]: I1125 17:12:30.237757 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pczt2" Nov 25 17:12:30 crc kubenswrapper[4812]: I1125 17:12:30.297121 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef-utilities\") pod \"e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef\" (UID: \"e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef\") " Nov 25 17:12:30 crc kubenswrapper[4812]: I1125 17:12:30.297362 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k5wz4\" (UniqueName: \"kubernetes.io/projected/e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef-kube-api-access-k5wz4\") pod \"e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef\" (UID: \"e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef\") " Nov 25 17:12:30 crc kubenswrapper[4812]: I1125 17:12:30.297406 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef-catalog-content\") pod \"e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef\" (UID: \"e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef\") " Nov 25 17:12:30 crc kubenswrapper[4812]: I1125 17:12:30.302150 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef-utilities" (OuterVolumeSpecName: "utilities") pod "e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef" (UID: "e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:12:30 crc kubenswrapper[4812]: I1125 17:12:30.316048 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef-kube-api-access-k5wz4" (OuterVolumeSpecName: "kube-api-access-k5wz4") pod "e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef" (UID: "e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef"). InnerVolumeSpecName "kube-api-access-k5wz4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:12:30 crc kubenswrapper[4812]: I1125 17:12:30.334851 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef" (UID: "e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:12:30 crc kubenswrapper[4812]: I1125 17:12:30.399697 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:12:30 crc kubenswrapper[4812]: I1125 17:12:30.399728 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k5wz4\" (UniqueName: \"kubernetes.io/projected/e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef-kube-api-access-k5wz4\") on node \"crc\" DevicePath \"\"" Nov 25 17:12:30 crc kubenswrapper[4812]: I1125 17:12:30.399739 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:12:31 crc kubenswrapper[4812]: I1125 17:12:31.367329 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-hs28w_9d47079d-27b3-446c-b58a-e30d00537660/registry-server/0.log" Nov 25 17:12:31 crc kubenswrapper[4812]: I1125 17:12:31.369269 4812 generic.go:334] "Generic (PLEG): container finished" podID="9d47079d-27b3-446c-b58a-e30d00537660" containerID="2ae9dd929bef99ac31a90900963e6b94e7144ca7e223ba33175fa15985a87ae0" exitCode=-1 Nov 25 17:12:31 crc kubenswrapper[4812]: I1125 17:12:31.369295 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hs28w" event={"ID":"9d47079d-27b3-446c-b58a-e30d00537660","Type":"ContainerDied","Data":"2ae9dd929bef99ac31a90900963e6b94e7144ca7e223ba33175fa15985a87ae0"} Nov 25 17:12:32 crc kubenswrapper[4812]: I1125 17:12:32.385777 4812 generic.go:334] "Generic (PLEG): container finished" podID="7e1417f9-84df-4f66-878f-5b70e2c90cf1" containerID="6cf7fd02552106f44f9ce015dc065c43c4a5ba0ceecf4581debd7c64b0cccd0e" exitCode=0 Nov 25 17:12:32 crc kubenswrapper[4812]: I1125 17:12:32.385894 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bpqbb" event={"ID":"7e1417f9-84df-4f66-878f-5b70e2c90cf1","Type":"ContainerDied","Data":"6cf7fd02552106f44f9ce015dc065c43c4a5ba0ceecf4581debd7c64b0cccd0e"} Nov 25 17:12:32 crc kubenswrapper[4812]: I1125 17:12:32.826857 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-s4zqk_17239e6b-3f83-49f2-8ac8-3e710ef1565b/registry-server/0.log" Nov 25 17:12:32 crc kubenswrapper[4812]: I1125 17:12:32.827768 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-s4zqk" Nov 25 17:12:32 crc kubenswrapper[4812]: I1125 17:12:32.839285 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-hs28w_9d47079d-27b3-446c-b58a-e30d00537660/registry-server/0.log" Nov 25 17:12:32 crc kubenswrapper[4812]: I1125 17:12:32.840719 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hs28w" Nov 25 17:12:32 crc kubenswrapper[4812]: I1125 17:12:32.979557 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/17239e6b-3f83-49f2-8ac8-3e710ef1565b-utilities\") pod \"17239e6b-3f83-49f2-8ac8-3e710ef1565b\" (UID: \"17239e6b-3f83-49f2-8ac8-3e710ef1565b\") " Nov 25 17:12:32 crc kubenswrapper[4812]: I1125 17:12:32.979666 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d47079d-27b3-446c-b58a-e30d00537660-catalog-content\") pod \"9d47079d-27b3-446c-b58a-e30d00537660\" (UID: \"9d47079d-27b3-446c-b58a-e30d00537660\") " Nov 25 17:12:32 crc kubenswrapper[4812]: I1125 17:12:32.979750 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zjvv6\" (UniqueName: \"kubernetes.io/projected/9d47079d-27b3-446c-b58a-e30d00537660-kube-api-access-zjvv6\") pod \"9d47079d-27b3-446c-b58a-e30d00537660\" (UID: \"9d47079d-27b3-446c-b58a-e30d00537660\") " Nov 25 17:12:32 crc kubenswrapper[4812]: I1125 17:12:32.979809 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d47079d-27b3-446c-b58a-e30d00537660-utilities\") pod \"9d47079d-27b3-446c-b58a-e30d00537660\" (UID: \"9d47079d-27b3-446c-b58a-e30d00537660\") " Nov 25 17:12:32 crc kubenswrapper[4812]: I1125 17:12:32.979861 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xx7kg\" (UniqueName: \"kubernetes.io/projected/17239e6b-3f83-49f2-8ac8-3e710ef1565b-kube-api-access-xx7kg\") pod \"17239e6b-3f83-49f2-8ac8-3e710ef1565b\" (UID: \"17239e6b-3f83-49f2-8ac8-3e710ef1565b\") " Nov 25 17:12:32 crc kubenswrapper[4812]: I1125 17:12:32.979981 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/17239e6b-3f83-49f2-8ac8-3e710ef1565b-catalog-content\") pod \"17239e6b-3f83-49f2-8ac8-3e710ef1565b\" (UID: \"17239e6b-3f83-49f2-8ac8-3e710ef1565b\") " Nov 25 17:12:32 crc kubenswrapper[4812]: I1125 17:12:32.980062 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/17239e6b-3f83-49f2-8ac8-3e710ef1565b-utilities" (OuterVolumeSpecName: "utilities") pod "17239e6b-3f83-49f2-8ac8-3e710ef1565b" (UID: "17239e6b-3f83-49f2-8ac8-3e710ef1565b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:12:32 crc kubenswrapper[4812]: I1125 17:12:32.980503 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/17239e6b-3f83-49f2-8ac8-3e710ef1565b-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:12:32 crc kubenswrapper[4812]: I1125 17:12:32.980506 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d47079d-27b3-446c-b58a-e30d00537660-utilities" (OuterVolumeSpecName: "utilities") pod "9d47079d-27b3-446c-b58a-e30d00537660" (UID: "9d47079d-27b3-446c-b58a-e30d00537660"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:12:32 crc kubenswrapper[4812]: I1125 17:12:32.986775 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/17239e6b-3f83-49f2-8ac8-3e710ef1565b-kube-api-access-xx7kg" (OuterVolumeSpecName: "kube-api-access-xx7kg") pod "17239e6b-3f83-49f2-8ac8-3e710ef1565b" (UID: "17239e6b-3f83-49f2-8ac8-3e710ef1565b"). InnerVolumeSpecName "kube-api-access-xx7kg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:12:32 crc kubenswrapper[4812]: I1125 17:12:32.997353 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d47079d-27b3-446c-b58a-e30d00537660-kube-api-access-zjvv6" (OuterVolumeSpecName: "kube-api-access-zjvv6") pod "9d47079d-27b3-446c-b58a-e30d00537660" (UID: "9d47079d-27b3-446c-b58a-e30d00537660"). InnerVolumeSpecName "kube-api-access-zjvv6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:12:33 crc kubenswrapper[4812]: I1125 17:12:33.083504 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zjvv6\" (UniqueName: \"kubernetes.io/projected/9d47079d-27b3-446c-b58a-e30d00537660-kube-api-access-zjvv6\") on node \"crc\" DevicePath \"\"" Nov 25 17:12:33 crc kubenswrapper[4812]: I1125 17:12:33.083634 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d47079d-27b3-446c-b58a-e30d00537660-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:12:33 crc kubenswrapper[4812]: I1125 17:12:33.083714 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xx7kg\" (UniqueName: \"kubernetes.io/projected/17239e6b-3f83-49f2-8ac8-3e710ef1565b-kube-api-access-xx7kg\") on node \"crc\" DevicePath \"\"" Nov 25 17:12:33 crc kubenswrapper[4812]: I1125 17:12:33.083603 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-tvgf8_466d78b1-4962-4530-8f14-23f1077f5e37/registry-server/0.log" Nov 25 17:12:33 crc kubenswrapper[4812]: I1125 17:12:33.084722 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tvgf8" Nov 25 17:12:33 crc kubenswrapper[4812]: I1125 17:12:33.087848 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-sbl8g_ba51f635-a14b-4763-b313-fcd9a3792c21/registry-server/0.log" Nov 25 17:12:33 crc kubenswrapper[4812]: I1125 17:12:33.088737 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sbl8g" Nov 25 17:12:33 crc kubenswrapper[4812]: I1125 17:12:33.183978 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/466d78b1-4962-4530-8f14-23f1077f5e37-catalog-content\") pod \"466d78b1-4962-4530-8f14-23f1077f5e37\" (UID: \"466d78b1-4962-4530-8f14-23f1077f5e37\") " Nov 25 17:12:33 crc kubenswrapper[4812]: I1125 17:12:33.184034 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/466d78b1-4962-4530-8f14-23f1077f5e37-utilities\") pod \"466d78b1-4962-4530-8f14-23f1077f5e37\" (UID: \"466d78b1-4962-4530-8f14-23f1077f5e37\") " Nov 25 17:12:33 crc kubenswrapper[4812]: I1125 17:12:33.184061 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j5vl4\" (UniqueName: \"kubernetes.io/projected/ba51f635-a14b-4763-b313-fcd9a3792c21-kube-api-access-j5vl4\") pod \"ba51f635-a14b-4763-b313-fcd9a3792c21\" (UID: \"ba51f635-a14b-4763-b313-fcd9a3792c21\") " Nov 25 17:12:33 crc kubenswrapper[4812]: I1125 17:12:33.184101 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vfg7c\" (UniqueName: \"kubernetes.io/projected/466d78b1-4962-4530-8f14-23f1077f5e37-kube-api-access-vfg7c\") pod \"466d78b1-4962-4530-8f14-23f1077f5e37\" (UID: \"466d78b1-4962-4530-8f14-23f1077f5e37\") " Nov 25 17:12:33 crc kubenswrapper[4812]: I1125 17:12:33.184118 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba51f635-a14b-4763-b313-fcd9a3792c21-catalog-content\") pod \"ba51f635-a14b-4763-b313-fcd9a3792c21\" (UID: \"ba51f635-a14b-4763-b313-fcd9a3792c21\") " Nov 25 17:12:33 crc kubenswrapper[4812]: I1125 17:12:33.184151 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba51f635-a14b-4763-b313-fcd9a3792c21-utilities\") pod \"ba51f635-a14b-4763-b313-fcd9a3792c21\" (UID: \"ba51f635-a14b-4763-b313-fcd9a3792c21\") " Nov 25 17:12:33 crc kubenswrapper[4812]: I1125 17:12:33.184507 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/466d78b1-4962-4530-8f14-23f1077f5e37-utilities" (OuterVolumeSpecName: "utilities") pod "466d78b1-4962-4530-8f14-23f1077f5e37" (UID: "466d78b1-4962-4530-8f14-23f1077f5e37"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:12:33 crc kubenswrapper[4812]: I1125 17:12:33.184731 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba51f635-a14b-4763-b313-fcd9a3792c21-utilities" (OuterVolumeSpecName: "utilities") pod "ba51f635-a14b-4763-b313-fcd9a3792c21" (UID: "ba51f635-a14b-4763-b313-fcd9a3792c21"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:12:33 crc kubenswrapper[4812]: I1125 17:12:33.187937 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/466d78b1-4962-4530-8f14-23f1077f5e37-kube-api-access-vfg7c" (OuterVolumeSpecName: "kube-api-access-vfg7c") pod "466d78b1-4962-4530-8f14-23f1077f5e37" (UID: "466d78b1-4962-4530-8f14-23f1077f5e37"). InnerVolumeSpecName "kube-api-access-vfg7c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:12:33 crc kubenswrapper[4812]: I1125 17:12:33.191933 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba51f635-a14b-4763-b313-fcd9a3792c21-kube-api-access-j5vl4" (OuterVolumeSpecName: "kube-api-access-j5vl4") pod "ba51f635-a14b-4763-b313-fcd9a3792c21" (UID: "ba51f635-a14b-4763-b313-fcd9a3792c21"). InnerVolumeSpecName "kube-api-access-j5vl4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:12:33 crc kubenswrapper[4812]: I1125 17:12:33.286950 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/466d78b1-4962-4530-8f14-23f1077f5e37-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:12:33 crc kubenswrapper[4812]: I1125 17:12:33.287386 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j5vl4\" (UniqueName: \"kubernetes.io/projected/ba51f635-a14b-4763-b313-fcd9a3792c21-kube-api-access-j5vl4\") on node \"crc\" DevicePath \"\"" Nov 25 17:12:33 crc kubenswrapper[4812]: I1125 17:12:33.287415 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vfg7c\" (UniqueName: \"kubernetes.io/projected/466d78b1-4962-4530-8f14-23f1077f5e37-kube-api-access-vfg7c\") on node \"crc\" DevicePath \"\"" Nov 25 17:12:33 crc kubenswrapper[4812]: I1125 17:12:33.287438 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba51f635-a14b-4763-b313-fcd9a3792c21-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:12:33 crc kubenswrapper[4812]: I1125 17:12:33.532092 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/466d78b1-4962-4530-8f14-23f1077f5e37-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "466d78b1-4962-4530-8f14-23f1077f5e37" (UID: "466d78b1-4962-4530-8f14-23f1077f5e37"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:12:33 crc kubenswrapper[4812]: I1125 17:12:33.591993 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/466d78b1-4962-4530-8f14-23f1077f5e37-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:12:33 crc kubenswrapper[4812]: I1125 17:12:33.707415 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/17239e6b-3f83-49f2-8ac8-3e710ef1565b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "17239e6b-3f83-49f2-8ac8-3e710ef1565b" (UID: "17239e6b-3f83-49f2-8ac8-3e710ef1565b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:12:33 crc kubenswrapper[4812]: I1125 17:12:33.757546 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba51f635-a14b-4763-b313-fcd9a3792c21-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ba51f635-a14b-4763-b313-fcd9a3792c21" (UID: "ba51f635-a14b-4763-b313-fcd9a3792c21"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:12:33 crc kubenswrapper[4812]: I1125 17:12:33.794507 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba51f635-a14b-4763-b313-fcd9a3792c21-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:12:33 crc kubenswrapper[4812]: I1125 17:12:33.794559 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/17239e6b-3f83-49f2-8ac8-3e710ef1565b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:12:33 crc kubenswrapper[4812]: I1125 17:12:33.813545 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d47079d-27b3-446c-b58a-e30d00537660-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9d47079d-27b3-446c-b58a-e30d00537660" (UID: "9d47079d-27b3-446c-b58a-e30d00537660"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:12:33 crc kubenswrapper[4812]: I1125 17:12:33.897928 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d47079d-27b3-446c-b58a-e30d00537660-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:12:33 crc kubenswrapper[4812]: I1125 17:12:33.947863 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b1f3d194-6274-433e-a9b2-136a62808c4b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b1f3d194-6274-433e-a9b2-136a62808c4b" (UID: "b1f3d194-6274-433e-a9b2-136a62808c4b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:12:34 crc kubenswrapper[4812]: I1125 17:12:34.001422 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b1f3d194-6274-433e-a9b2-136a62808c4b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:12:36 crc kubenswrapper[4812]: I1125 17:12:36.124928 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-hs28w_9d47079d-27b3-446c-b58a-e30d00537660/registry-server/0.log" Nov 25 17:12:36 crc kubenswrapper[4812]: I1125 17:12:36.126641 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hs28w" event={"ID":"9d47079d-27b3-446c-b58a-e30d00537660","Type":"ContainerDied","Data":"ebb5465e71553b44b42276c942478d9efcf686c2ca13fb09e8170e75a5b8c901"} Nov 25 17:12:36 crc kubenswrapper[4812]: I1125 17:12:36.126704 4812 scope.go:117] "RemoveContainer" containerID="2ae9dd929bef99ac31a90900963e6b94e7144ca7e223ba33175fa15985a87ae0" Nov 25 17:12:36 crc kubenswrapper[4812]: I1125 17:12:36.131284 4812 generic.go:334] "Generic (PLEG): container finished" podID="e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef" containerID="206c50ec1bbbb7bd4bb422a6ebc1678cb0c4cc847ab71809faf8d250d72e152b" exitCode=0 Nov 25 17:12:36 crc kubenswrapper[4812]: I1125 17:12:36.131322 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pczt2" event={"ID":"e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef","Type":"ContainerDied","Data":"206c50ec1bbbb7bd4bb422a6ebc1678cb0c4cc847ab71809faf8d250d72e152b"} Nov 25 17:12:36 crc kubenswrapper[4812]: I1125 17:12:36.131361 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pczt2" Nov 25 17:12:36 crc kubenswrapper[4812]: I1125 17:12:36.131368 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pczt2" event={"ID":"e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef","Type":"ContainerDied","Data":"46b0bc6eea7b5420040bc769105c3b72baf3443800768f4ba533e0e1075abd49"} Nov 25 17:12:36 crc kubenswrapper[4812]: I1125 17:12:36.153784 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pczt2"] Nov 25 17:12:36 crc kubenswrapper[4812]: I1125 17:12:36.164171 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-pczt2"] Nov 25 17:12:37 crc kubenswrapper[4812]: I1125 17:12:37.841203 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef" path="/var/lib/kubelet/pods/e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef/volumes" Nov 25 17:12:39 crc kubenswrapper[4812]: I1125 17:12:39.869444 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-s4zqk_17239e6b-3f83-49f2-8ac8-3e710ef1565b/registry-server/0.log" Nov 25 17:12:39 crc kubenswrapper[4812]: I1125 17:12:39.870980 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s4zqk" event={"ID":"17239e6b-3f83-49f2-8ac8-3e710ef1565b","Type":"ContainerDied","Data":"604c863b59d77f9a11f7f3c0ddc2d349ec619d182978b64c8b5bb25916db8532"} Nov 25 17:12:39 crc kubenswrapper[4812]: I1125 17:12:39.871021 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-s4zqk" Nov 25 17:12:39 crc kubenswrapper[4812]: I1125 17:12:39.899287 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-s4zqk"] Nov 25 17:12:39 crc kubenswrapper[4812]: I1125 17:12:39.925826 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-s4zqk"] Nov 25 17:12:41 crc kubenswrapper[4812]: I1125 17:12:41.840776 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="17239e6b-3f83-49f2-8ac8-3e710ef1565b" path="/var/lib/kubelet/pods/17239e6b-3f83-49f2-8ac8-3e710ef1565b/volumes" Nov 25 17:12:43 crc kubenswrapper[4812]: I1125 17:12:43.606312 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-tvgf8_466d78b1-4962-4530-8f14-23f1077f5e37/registry-server/0.log" Nov 25 17:12:43 crc kubenswrapper[4812]: I1125 17:12:43.612212 4812 generic.go:334] "Generic (PLEG): container finished" podID="466d78b1-4962-4530-8f14-23f1077f5e37" containerID="f5c8903c7f8a802821f94b4a9e0051bc17c1bb4a5b0d78088c51eab4b6907508" exitCode=-1 Nov 25 17:12:43 crc kubenswrapper[4812]: I1125 17:12:43.612298 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tvgf8" event={"ID":"466d78b1-4962-4530-8f14-23f1077f5e37","Type":"ContainerDied","Data":"f5c8903c7f8a802821f94b4a9e0051bc17c1bb4a5b0d78088c51eab4b6907508"} Nov 25 17:12:43 crc kubenswrapper[4812]: I1125 17:12:43.612331 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tvgf8" event={"ID":"466d78b1-4962-4530-8f14-23f1077f5e37","Type":"ContainerDied","Data":"f16331351a05fe3728cd77e6c47ef5e7b894caa5409cbcdabe02040aa1578ae9"} Nov 25 17:12:43 crc kubenswrapper[4812]: I1125 17:12:43.612441 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tvgf8" Nov 25 17:12:43 crc kubenswrapper[4812]: I1125 17:12:43.614146 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-z72wj_b1f3d194-6274-433e-a9b2-136a62808c4b/registry-server/0.log" Nov 25 17:12:43 crc kubenswrapper[4812]: I1125 17:12:43.614592 4812 generic.go:334] "Generic (PLEG): container finished" podID="b1f3d194-6274-433e-a9b2-136a62808c4b" containerID="e06e41eef9e6ce69bb0869c83e5a9b2b62853471c61f81634019b5d8218e9d5b" exitCode=137 Nov 25 17:12:43 crc kubenswrapper[4812]: I1125 17:12:43.614627 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z72wj" event={"ID":"b1f3d194-6274-433e-a9b2-136a62808c4b","Type":"ContainerDied","Data":"e06e41eef9e6ce69bb0869c83e5a9b2b62853471c61f81634019b5d8218e9d5b"} Nov 25 17:12:43 crc kubenswrapper[4812]: I1125 17:12:43.614643 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z72wj" event={"ID":"b1f3d194-6274-433e-a9b2-136a62808c4b","Type":"ContainerDied","Data":"43d4de6a4f7a2548a41bc5cd4b388ec663d3625fd00c4d632184eb36685d0987"} Nov 25 17:12:43 crc kubenswrapper[4812]: I1125 17:12:43.614709 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z72wj" Nov 25 17:12:43 crc kubenswrapper[4812]: I1125 17:12:43.647263 4812 scope.go:117] "RemoveContainer" containerID="a40a83599b05e5314d61d4b8124a8d61a47a20957d0944a3a18f631838db4e42" Nov 25 17:12:43 crc kubenswrapper[4812]: I1125 17:12:43.660670 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tvgf8"] Nov 25 17:12:43 crc kubenswrapper[4812]: I1125 17:12:43.675467 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-tvgf8"] Nov 25 17:12:43 crc kubenswrapper[4812]: I1125 17:12:43.692801 4812 scope.go:117] "RemoveContainer" containerID="ef403abc05d52f2703347ad12fae894223e75cc38c893f95c840dc1e71bc26c7" Nov 25 17:12:43 crc kubenswrapper[4812]: I1125 17:12:43.693097 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-z72wj"] Nov 25 17:12:43 crc kubenswrapper[4812]: I1125 17:12:43.703986 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-z72wj"] Nov 25 17:12:43 crc kubenswrapper[4812]: I1125 17:12:43.730294 4812 scope.go:117] "RemoveContainer" containerID="206c50ec1bbbb7bd4bb422a6ebc1678cb0c4cc847ab71809faf8d250d72e152b" Nov 25 17:12:43 crc kubenswrapper[4812]: I1125 17:12:43.771742 4812 scope.go:117] "RemoveContainer" containerID="799c7f8b6ee6ffa8ce4cb56ee874b011aeffb8ceead6172ecb9ac644895f2f62" Nov 25 17:12:43 crc kubenswrapper[4812]: I1125 17:12:43.788766 4812 scope.go:117] "RemoveContainer" containerID="69e702c01ef043daa07872629738bcac12b7de7a1d0bcdbe8b6579e81b2f425f" Nov 25 17:12:43 crc kubenswrapper[4812]: I1125 17:12:43.834966 4812 scope.go:117] "RemoveContainer" containerID="206c50ec1bbbb7bd4bb422a6ebc1678cb0c4cc847ab71809faf8d250d72e152b" Nov 25 17:12:43 crc kubenswrapper[4812]: E1125 17:12:43.835388 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"206c50ec1bbbb7bd4bb422a6ebc1678cb0c4cc847ab71809faf8d250d72e152b\": container with ID starting with 206c50ec1bbbb7bd4bb422a6ebc1678cb0c4cc847ab71809faf8d250d72e152b not found: ID does not exist" containerID="206c50ec1bbbb7bd4bb422a6ebc1678cb0c4cc847ab71809faf8d250d72e152b" Nov 25 17:12:43 crc kubenswrapper[4812]: I1125 17:12:43.835425 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"206c50ec1bbbb7bd4bb422a6ebc1678cb0c4cc847ab71809faf8d250d72e152b"} err="failed to get container status \"206c50ec1bbbb7bd4bb422a6ebc1678cb0c4cc847ab71809faf8d250d72e152b\": rpc error: code = NotFound desc = could not find container \"206c50ec1bbbb7bd4bb422a6ebc1678cb0c4cc847ab71809faf8d250d72e152b\": container with ID starting with 206c50ec1bbbb7bd4bb422a6ebc1678cb0c4cc847ab71809faf8d250d72e152b not found: ID does not exist" Nov 25 17:12:43 crc kubenswrapper[4812]: I1125 17:12:43.835452 4812 scope.go:117] "RemoveContainer" containerID="799c7f8b6ee6ffa8ce4cb56ee874b011aeffb8ceead6172ecb9ac644895f2f62" Nov 25 17:12:43 crc kubenswrapper[4812]: E1125 17:12:43.835712 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"799c7f8b6ee6ffa8ce4cb56ee874b011aeffb8ceead6172ecb9ac644895f2f62\": container with ID starting with 799c7f8b6ee6ffa8ce4cb56ee874b011aeffb8ceead6172ecb9ac644895f2f62 not found: ID does not exist" containerID="799c7f8b6ee6ffa8ce4cb56ee874b011aeffb8ceead6172ecb9ac644895f2f62" Nov 25 17:12:43 crc kubenswrapper[4812]: I1125 17:12:43.835738 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"799c7f8b6ee6ffa8ce4cb56ee874b011aeffb8ceead6172ecb9ac644895f2f62"} err="failed to get container status \"799c7f8b6ee6ffa8ce4cb56ee874b011aeffb8ceead6172ecb9ac644895f2f62\": rpc error: code = NotFound desc = could not find container \"799c7f8b6ee6ffa8ce4cb56ee874b011aeffb8ceead6172ecb9ac644895f2f62\": container with ID starting with 799c7f8b6ee6ffa8ce4cb56ee874b011aeffb8ceead6172ecb9ac644895f2f62 not found: ID does not exist" Nov 25 17:12:43 crc kubenswrapper[4812]: I1125 17:12:43.835754 4812 scope.go:117] "RemoveContainer" containerID="69e702c01ef043daa07872629738bcac12b7de7a1d0bcdbe8b6579e81b2f425f" Nov 25 17:12:43 crc kubenswrapper[4812]: E1125 17:12:43.835948 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"69e702c01ef043daa07872629738bcac12b7de7a1d0bcdbe8b6579e81b2f425f\": container with ID starting with 69e702c01ef043daa07872629738bcac12b7de7a1d0bcdbe8b6579e81b2f425f not found: ID does not exist" containerID="69e702c01ef043daa07872629738bcac12b7de7a1d0bcdbe8b6579e81b2f425f" Nov 25 17:12:43 crc kubenswrapper[4812]: I1125 17:12:43.835974 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"69e702c01ef043daa07872629738bcac12b7de7a1d0bcdbe8b6579e81b2f425f"} err="failed to get container status \"69e702c01ef043daa07872629738bcac12b7de7a1d0bcdbe8b6579e81b2f425f\": rpc error: code = NotFound desc = could not find container \"69e702c01ef043daa07872629738bcac12b7de7a1d0bcdbe8b6579e81b2f425f\": container with ID starting with 69e702c01ef043daa07872629738bcac12b7de7a1d0bcdbe8b6579e81b2f425f not found: ID does not exist" Nov 25 17:12:43 crc kubenswrapper[4812]: I1125 17:12:43.835992 4812 scope.go:117] "RemoveContainer" containerID="af0dddbd48a859897b6a29b2028092e4960cbf8f7171e1f1e01fd6723169c888" Nov 25 17:12:43 crc kubenswrapper[4812]: I1125 17:12:43.842829 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="466d78b1-4962-4530-8f14-23f1077f5e37" path="/var/lib/kubelet/pods/466d78b1-4962-4530-8f14-23f1077f5e37/volumes" Nov 25 17:12:43 crc kubenswrapper[4812]: I1125 17:12:43.843456 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b1f3d194-6274-433e-a9b2-136a62808c4b" path="/var/lib/kubelet/pods/b1f3d194-6274-433e-a9b2-136a62808c4b/volumes" Nov 25 17:12:47 crc kubenswrapper[4812]: I1125 17:12:47.357477 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-sbl8g_ba51f635-a14b-4763-b313-fcd9a3792c21/registry-server/0.log" Nov 25 17:12:47 crc kubenswrapper[4812]: I1125 17:12:47.360962 4812 generic.go:334] "Generic (PLEG): container finished" podID="ba51f635-a14b-4763-b313-fcd9a3792c21" containerID="1a94b68e39d850c54b64983aea0134dfcee64b7f1907d0c3128ccd0b2e232483" exitCode=-1 Nov 25 17:12:47 crc kubenswrapper[4812]: I1125 17:12:47.361004 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sbl8g" event={"ID":"ba51f635-a14b-4763-b313-fcd9a3792c21","Type":"ContainerDied","Data":"1a94b68e39d850c54b64983aea0134dfcee64b7f1907d0c3128ccd0b2e232483"} Nov 25 17:12:47 crc kubenswrapper[4812]: I1125 17:12:47.361033 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sbl8g" event={"ID":"ba51f635-a14b-4763-b313-fcd9a3792c21","Type":"ContainerDied","Data":"72e21980c1a1f73ac92b68ba6814c0febc86cfd97391c8702b0e27a81715ac53"} Nov 25 17:12:47 crc kubenswrapper[4812]: I1125 17:12:47.361085 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sbl8g" Nov 25 17:12:47 crc kubenswrapper[4812]: I1125 17:12:47.388143 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-sbl8g"] Nov 25 17:12:47 crc kubenswrapper[4812]: I1125 17:12:47.402494 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-sbl8g"] Nov 25 17:12:47 crc kubenswrapper[4812]: I1125 17:12:47.844300 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba51f635-a14b-4763-b313-fcd9a3792c21" path="/var/lib/kubelet/pods/ba51f635-a14b-4763-b313-fcd9a3792c21/volumes" Nov 25 17:12:48 crc kubenswrapper[4812]: I1125 17:12:48.372655 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bpqbb" event={"ID":"7e1417f9-84df-4f66-878f-5b70e2c90cf1","Type":"ContainerStarted","Data":"ec9e3a33e087f6cd27a03839d81a7fa33dda001fd592bd4c38e7788c3f81207d"} Nov 25 17:12:48 crc kubenswrapper[4812]: I1125 17:12:48.374030 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hs28w" Nov 25 17:12:48 crc kubenswrapper[4812]: I1125 17:12:48.375604 4812 generic.go:334] "Generic (PLEG): container finished" podID="dec7f299-d822-494a-9a86-351502541b77" containerID="a75b9c52212987054edd4d53fecd8822dd3185c37899ee6dfd76c3a40c8cd155" exitCode=0 Nov 25 17:12:48 crc kubenswrapper[4812]: I1125 17:12:48.375643 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-855kz" event={"ID":"dec7f299-d822-494a-9a86-351502541b77","Type":"ContainerDied","Data":"a75b9c52212987054edd4d53fecd8822dd3185c37899ee6dfd76c3a40c8cd155"} Nov 25 17:12:48 crc kubenswrapper[4812]: I1125 17:12:48.398431 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-bpqbb" podStartSLOduration=17.189698422 podStartE2EDuration="41.398412173s" podCreationTimestamp="2025-11-25 17:12:07 +0000 UTC" firstStartedPulling="2025-11-25 17:12:10.761998774 +0000 UTC m=+1505.602140869" lastFinishedPulling="2025-11-25 17:12:34.970712525 +0000 UTC m=+1529.810854620" observedRunningTime="2025-11-25 17:12:48.390073308 +0000 UTC m=+1543.230215413" watchObservedRunningTime="2025-11-25 17:12:48.398412173 +0000 UTC m=+1543.238554278" Nov 25 17:12:48 crc kubenswrapper[4812]: I1125 17:12:48.424777 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hs28w"] Nov 25 17:12:48 crc kubenswrapper[4812]: I1125 17:12:48.432878 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-hs28w"] Nov 25 17:12:49 crc kubenswrapper[4812]: I1125 17:12:49.788268 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-855kz" Nov 25 17:12:49 crc kubenswrapper[4812]: I1125 17:12:49.843229 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d47079d-27b3-446c-b58a-e30d00537660" path="/var/lib/kubelet/pods/9d47079d-27b3-446c-b58a-e30d00537660/volumes" Nov 25 17:12:49 crc kubenswrapper[4812]: I1125 17:12:49.979498 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/dec7f299-d822-494a-9a86-351502541b77-ssh-key\") pod \"dec7f299-d822-494a-9a86-351502541b77\" (UID: \"dec7f299-d822-494a-9a86-351502541b77\") " Nov 25 17:12:49 crc kubenswrapper[4812]: I1125 17:12:49.979988 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cx75l\" (UniqueName: \"kubernetes.io/projected/dec7f299-d822-494a-9a86-351502541b77-kube-api-access-cx75l\") pod \"dec7f299-d822-494a-9a86-351502541b77\" (UID: \"dec7f299-d822-494a-9a86-351502541b77\") " Nov 25 17:12:49 crc kubenswrapper[4812]: I1125 17:12:49.980009 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dec7f299-d822-494a-9a86-351502541b77-inventory\") pod \"dec7f299-d822-494a-9a86-351502541b77\" (UID: \"dec7f299-d822-494a-9a86-351502541b77\") " Nov 25 17:12:49 crc kubenswrapper[4812]: I1125 17:12:49.980078 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dec7f299-d822-494a-9a86-351502541b77-bootstrap-combined-ca-bundle\") pod \"dec7f299-d822-494a-9a86-351502541b77\" (UID: \"dec7f299-d822-494a-9a86-351502541b77\") " Nov 25 17:12:49 crc kubenswrapper[4812]: I1125 17:12:49.992098 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dec7f299-d822-494a-9a86-351502541b77-kube-api-access-cx75l" (OuterVolumeSpecName: "kube-api-access-cx75l") pod "dec7f299-d822-494a-9a86-351502541b77" (UID: "dec7f299-d822-494a-9a86-351502541b77"). InnerVolumeSpecName "kube-api-access-cx75l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.005762 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dec7f299-d822-494a-9a86-351502541b77-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "dec7f299-d822-494a-9a86-351502541b77" (UID: "dec7f299-d822-494a-9a86-351502541b77"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.046776 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dec7f299-d822-494a-9a86-351502541b77-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "dec7f299-d822-494a-9a86-351502541b77" (UID: "dec7f299-d822-494a-9a86-351502541b77"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.053615 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dec7f299-d822-494a-9a86-351502541b77-inventory" (OuterVolumeSpecName: "inventory") pod "dec7f299-d822-494a-9a86-351502541b77" (UID: "dec7f299-d822-494a-9a86-351502541b77"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.081968 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cx75l\" (UniqueName: \"kubernetes.io/projected/dec7f299-d822-494a-9a86-351502541b77-kube-api-access-cx75l\") on node \"crc\" DevicePath \"\"" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.082008 4812 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dec7f299-d822-494a-9a86-351502541b77-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.082021 4812 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dec7f299-d822-494a-9a86-351502541b77-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.082032 4812 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/dec7f299-d822-494a-9a86-351502541b77-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.400177 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-855kz" event={"ID":"dec7f299-d822-494a-9a86-351502541b77","Type":"ContainerDied","Data":"b29bae44bf5cafd7d2ab698802529aacabc2a94e48b44301eb397af0a69bfab5"} Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.400221 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b29bae44bf5cafd7d2ab698802529aacabc2a94e48b44301eb397af0a69bfab5" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.400229 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-855kz" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.505703 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qsf5l"] Nov 25 17:12:50 crc kubenswrapper[4812]: E1125 17:12:50.506290 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba51f635-a14b-4763-b313-fcd9a3792c21" containerName="registry-server" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.506315 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba51f635-a14b-4763-b313-fcd9a3792c21" containerName="registry-server" Nov 25 17:12:50 crc kubenswrapper[4812]: E1125 17:12:50.506351 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba51f635-a14b-4763-b313-fcd9a3792c21" containerName="extract-content" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.506359 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba51f635-a14b-4763-b313-fcd9a3792c21" containerName="extract-content" Nov 25 17:12:50 crc kubenswrapper[4812]: E1125 17:12:50.506371 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d47079d-27b3-446c-b58a-e30d00537660" containerName="extract-content" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.506378 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d47079d-27b3-446c-b58a-e30d00537660" containerName="extract-content" Nov 25 17:12:50 crc kubenswrapper[4812]: E1125 17:12:50.506390 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dec7f299-d822-494a-9a86-351502541b77" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.506397 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="dec7f299-d822-494a-9a86-351502541b77" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 25 17:12:50 crc kubenswrapper[4812]: E1125 17:12:50.506405 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17239e6b-3f83-49f2-8ac8-3e710ef1565b" containerName="extract-utilities" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.506412 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="17239e6b-3f83-49f2-8ac8-3e710ef1565b" containerName="extract-utilities" Nov 25 17:12:50 crc kubenswrapper[4812]: E1125 17:12:50.506427 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1f3d194-6274-433e-a9b2-136a62808c4b" containerName="registry-server" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.506433 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1f3d194-6274-433e-a9b2-136a62808c4b" containerName="registry-server" Nov 25 17:12:50 crc kubenswrapper[4812]: E1125 17:12:50.506442 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef" containerName="extract-utilities" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.506448 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef" containerName="extract-utilities" Nov 25 17:12:50 crc kubenswrapper[4812]: E1125 17:12:50.506459 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef" containerName="extract-content" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.506465 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef" containerName="extract-content" Nov 25 17:12:50 crc kubenswrapper[4812]: E1125 17:12:50.506475 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17239e6b-3f83-49f2-8ac8-3e710ef1565b" containerName="registry-server" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.506482 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="17239e6b-3f83-49f2-8ac8-3e710ef1565b" containerName="registry-server" Nov 25 17:12:50 crc kubenswrapper[4812]: E1125 17:12:50.506492 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="466d78b1-4962-4530-8f14-23f1077f5e37" containerName="registry-server" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.506500 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="466d78b1-4962-4530-8f14-23f1077f5e37" containerName="registry-server" Nov 25 17:12:50 crc kubenswrapper[4812]: E1125 17:12:50.506508 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d47079d-27b3-446c-b58a-e30d00537660" containerName="registry-server" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.506513 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d47079d-27b3-446c-b58a-e30d00537660" containerName="registry-server" Nov 25 17:12:50 crc kubenswrapper[4812]: E1125 17:12:50.506550 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17239e6b-3f83-49f2-8ac8-3e710ef1565b" containerName="extract-content" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.506559 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="17239e6b-3f83-49f2-8ac8-3e710ef1565b" containerName="extract-content" Nov 25 17:12:50 crc kubenswrapper[4812]: E1125 17:12:50.506569 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d47079d-27b3-446c-b58a-e30d00537660" containerName="extract-utilities" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.506575 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d47079d-27b3-446c-b58a-e30d00537660" containerName="extract-utilities" Nov 25 17:12:50 crc kubenswrapper[4812]: E1125 17:12:50.506586 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1f3d194-6274-433e-a9b2-136a62808c4b" containerName="extract-content" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.506592 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1f3d194-6274-433e-a9b2-136a62808c4b" containerName="extract-content" Nov 25 17:12:50 crc kubenswrapper[4812]: E1125 17:12:50.506600 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="466d78b1-4962-4530-8f14-23f1077f5e37" containerName="extract-utilities" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.506607 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="466d78b1-4962-4530-8f14-23f1077f5e37" containerName="extract-utilities" Nov 25 17:12:50 crc kubenswrapper[4812]: E1125 17:12:50.506615 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1f3d194-6274-433e-a9b2-136a62808c4b" containerName="extract-utilities" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.506621 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1f3d194-6274-433e-a9b2-136a62808c4b" containerName="extract-utilities" Nov 25 17:12:50 crc kubenswrapper[4812]: E1125 17:12:50.506638 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba51f635-a14b-4763-b313-fcd9a3792c21" containerName="extract-utilities" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.506653 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba51f635-a14b-4763-b313-fcd9a3792c21" containerName="extract-utilities" Nov 25 17:12:50 crc kubenswrapper[4812]: E1125 17:12:50.506666 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="466d78b1-4962-4530-8f14-23f1077f5e37" containerName="extract-content" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.506674 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="466d78b1-4962-4530-8f14-23f1077f5e37" containerName="extract-content" Nov 25 17:12:50 crc kubenswrapper[4812]: E1125 17:12:50.506687 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef" containerName="registry-server" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.506693 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef" containerName="registry-server" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.506864 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1f3d194-6274-433e-a9b2-136a62808c4b" containerName="registry-server" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.506877 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="dec7f299-d822-494a-9a86-351502541b77" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.506893 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="17239e6b-3f83-49f2-8ac8-3e710ef1565b" containerName="registry-server" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.506905 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba51f635-a14b-4763-b313-fcd9a3792c21" containerName="registry-server" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.506914 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d47079d-27b3-446c-b58a-e30d00537660" containerName="registry-server" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.506926 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="466d78b1-4962-4530-8f14-23f1077f5e37" containerName="registry-server" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.506940 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2aa4e7c-f2ff-4f64-b011-fc3dfcf361ef" containerName="registry-server" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.507627 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qsf5l" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.509651 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.512472 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.512556 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wtld4" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.512925 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.517887 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qsf5l"] Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.691282 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9771507e-2a96-4539-ac2f-00a418f64803-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-qsf5l\" (UID: \"9771507e-2a96-4539-ac2f-00a418f64803\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qsf5l" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.691336 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9771507e-2a96-4539-ac2f-00a418f64803-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-qsf5l\" (UID: \"9771507e-2a96-4539-ac2f-00a418f64803\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qsf5l" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.691362 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-swjbd\" (UniqueName: \"kubernetes.io/projected/9771507e-2a96-4539-ac2f-00a418f64803-kube-api-access-swjbd\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-qsf5l\" (UID: \"9771507e-2a96-4539-ac2f-00a418f64803\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qsf5l" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.792732 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9771507e-2a96-4539-ac2f-00a418f64803-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-qsf5l\" (UID: \"9771507e-2a96-4539-ac2f-00a418f64803\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qsf5l" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.792784 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9771507e-2a96-4539-ac2f-00a418f64803-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-qsf5l\" (UID: \"9771507e-2a96-4539-ac2f-00a418f64803\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qsf5l" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.792810 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-swjbd\" (UniqueName: \"kubernetes.io/projected/9771507e-2a96-4539-ac2f-00a418f64803-kube-api-access-swjbd\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-qsf5l\" (UID: \"9771507e-2a96-4539-ac2f-00a418f64803\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qsf5l" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.798216 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9771507e-2a96-4539-ac2f-00a418f64803-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-qsf5l\" (UID: \"9771507e-2a96-4539-ac2f-00a418f64803\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qsf5l" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.803189 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9771507e-2a96-4539-ac2f-00a418f64803-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-qsf5l\" (UID: \"9771507e-2a96-4539-ac2f-00a418f64803\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qsf5l" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.809183 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-swjbd\" (UniqueName: \"kubernetes.io/projected/9771507e-2a96-4539-ac2f-00a418f64803-kube-api-access-swjbd\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-qsf5l\" (UID: \"9771507e-2a96-4539-ac2f-00a418f64803\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qsf5l" Nov 25 17:12:50 crc kubenswrapper[4812]: I1125 17:12:50.824224 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qsf5l" Nov 25 17:12:51 crc kubenswrapper[4812]: I1125 17:12:51.338312 4812 scope.go:117] "RemoveContainer" containerID="a5a9e7bdff8e1f5d378038365d19484a1acd020cf1387be7213038aff1760d9f" Nov 25 17:12:51 crc kubenswrapper[4812]: I1125 17:12:51.391040 4812 scope.go:117] "RemoveContainer" containerID="c40af37826d4abdf914c10e6548f94c69232fb8a87c8b7c6f3157e5103ce1c58" Nov 25 17:12:51 crc kubenswrapper[4812]: I1125 17:12:51.441362 4812 scope.go:117] "RemoveContainer" containerID="f5c8903c7f8a802821f94b4a9e0051bc17c1bb4a5b0d78088c51eab4b6907508" Nov 25 17:12:51 crc kubenswrapper[4812]: I1125 17:12:51.442087 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qsf5l"] Nov 25 17:12:52 crc kubenswrapper[4812]: I1125 17:12:52.444796 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qsf5l" event={"ID":"9771507e-2a96-4539-ac2f-00a418f64803","Type":"ContainerStarted","Data":"c915e0f5460edce953cce2157147360fd24054c7dfe2f403e2c1981b99f099a8"} Nov 25 17:12:52 crc kubenswrapper[4812]: I1125 17:12:52.445254 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qsf5l" event={"ID":"9771507e-2a96-4539-ac2f-00a418f64803","Type":"ContainerStarted","Data":"299488dbd9ac0a392fcb4e136f838ee982bf6413ed82094ec15309bff5e6b00d"} Nov 25 17:12:52 crc kubenswrapper[4812]: I1125 17:12:52.466174 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qsf5l" podStartSLOduration=1.942950994 podStartE2EDuration="2.46614983s" podCreationTimestamp="2025-11-25 17:12:50 +0000 UTC" firstStartedPulling="2025-11-25 17:12:51.455600233 +0000 UTC m=+1546.295742328" lastFinishedPulling="2025-11-25 17:12:51.978799069 +0000 UTC m=+1546.818941164" observedRunningTime="2025-11-25 17:12:52.462774939 +0000 UTC m=+1547.302917044" watchObservedRunningTime="2025-11-25 17:12:52.46614983 +0000 UTC m=+1547.306291925" Nov 25 17:12:52 crc kubenswrapper[4812]: I1125 17:12:52.516681 4812 scope.go:117] "RemoveContainer" containerID="96b74e481278bc080f065227435d8424f93c75de3c3152f7c4c2a1dcb1fe9c1a" Nov 25 17:12:55 crc kubenswrapper[4812]: I1125 17:12:55.197738 4812 scope.go:117] "RemoveContainer" containerID="6d0325b0e51e576f2e4fa22be98f5c7f4a678983a5954059d3980ad0e5f95d96" Nov 25 17:12:57 crc kubenswrapper[4812]: I1125 17:12:57.332653 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:12:57 crc kubenswrapper[4812]: I1125 17:12:57.333023 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:12:58 crc kubenswrapper[4812]: I1125 17:12:58.254359 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-bpqbb" Nov 25 17:12:58 crc kubenswrapper[4812]: I1125 17:12:58.254417 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-bpqbb" Nov 25 17:12:58 crc kubenswrapper[4812]: I1125 17:12:58.303869 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-bpqbb" Nov 25 17:12:58 crc kubenswrapper[4812]: I1125 17:12:58.542775 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-bpqbb" Nov 25 17:12:58 crc kubenswrapper[4812]: I1125 17:12:58.588685 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bpqbb"] Nov 25 17:12:58 crc kubenswrapper[4812]: I1125 17:12:58.932654 4812 scope.go:117] "RemoveContainer" containerID="b4b66900c5aa1e17a0d53ff61d461d014b91986386823f163ffc78e1f72cb934" Nov 25 17:12:58 crc kubenswrapper[4812]: I1125 17:12:58.968621 4812 scope.go:117] "RemoveContainer" containerID="14867abf92daa3e1d8350d568dfc3bf2a46aaa4109aab8c253fa517771fb8414" Nov 25 17:12:59 crc kubenswrapper[4812]: I1125 17:12:59.012158 4812 scope.go:117] "RemoveContainer" containerID="f5c8903c7f8a802821f94b4a9e0051bc17c1bb4a5b0d78088c51eab4b6907508" Nov 25 17:12:59 crc kubenswrapper[4812]: E1125 17:12:59.012805 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f5c8903c7f8a802821f94b4a9e0051bc17c1bb4a5b0d78088c51eab4b6907508\": container with ID starting with f5c8903c7f8a802821f94b4a9e0051bc17c1bb4a5b0d78088c51eab4b6907508 not found: ID does not exist" containerID="f5c8903c7f8a802821f94b4a9e0051bc17c1bb4a5b0d78088c51eab4b6907508" Nov 25 17:12:59 crc kubenswrapper[4812]: I1125 17:12:59.012836 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f5c8903c7f8a802821f94b4a9e0051bc17c1bb4a5b0d78088c51eab4b6907508"} err="failed to get container status \"f5c8903c7f8a802821f94b4a9e0051bc17c1bb4a5b0d78088c51eab4b6907508\": rpc error: code = NotFound desc = could not find container \"f5c8903c7f8a802821f94b4a9e0051bc17c1bb4a5b0d78088c51eab4b6907508\": container with ID starting with f5c8903c7f8a802821f94b4a9e0051bc17c1bb4a5b0d78088c51eab4b6907508 not found: ID does not exist" Nov 25 17:12:59 crc kubenswrapper[4812]: I1125 17:12:59.012858 4812 scope.go:117] "RemoveContainer" containerID="b4b66900c5aa1e17a0d53ff61d461d014b91986386823f163ffc78e1f72cb934" Nov 25 17:12:59 crc kubenswrapper[4812]: E1125 17:12:59.013106 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b4b66900c5aa1e17a0d53ff61d461d014b91986386823f163ffc78e1f72cb934\": container with ID starting with b4b66900c5aa1e17a0d53ff61d461d014b91986386823f163ffc78e1f72cb934 not found: ID does not exist" containerID="b4b66900c5aa1e17a0d53ff61d461d014b91986386823f163ffc78e1f72cb934" Nov 25 17:12:59 crc kubenswrapper[4812]: I1125 17:12:59.013125 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b4b66900c5aa1e17a0d53ff61d461d014b91986386823f163ffc78e1f72cb934"} err="failed to get container status \"b4b66900c5aa1e17a0d53ff61d461d014b91986386823f163ffc78e1f72cb934\": rpc error: code = NotFound desc = could not find container \"b4b66900c5aa1e17a0d53ff61d461d014b91986386823f163ffc78e1f72cb934\": container with ID starting with b4b66900c5aa1e17a0d53ff61d461d014b91986386823f163ffc78e1f72cb934 not found: ID does not exist" Nov 25 17:12:59 crc kubenswrapper[4812]: I1125 17:12:59.013137 4812 scope.go:117] "RemoveContainer" containerID="14867abf92daa3e1d8350d568dfc3bf2a46aaa4109aab8c253fa517771fb8414" Nov 25 17:12:59 crc kubenswrapper[4812]: E1125 17:12:59.013350 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"14867abf92daa3e1d8350d568dfc3bf2a46aaa4109aab8c253fa517771fb8414\": container with ID starting with 14867abf92daa3e1d8350d568dfc3bf2a46aaa4109aab8c253fa517771fb8414 not found: ID does not exist" containerID="14867abf92daa3e1d8350d568dfc3bf2a46aaa4109aab8c253fa517771fb8414" Nov 25 17:12:59 crc kubenswrapper[4812]: I1125 17:12:59.013367 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"14867abf92daa3e1d8350d568dfc3bf2a46aaa4109aab8c253fa517771fb8414"} err="failed to get container status \"14867abf92daa3e1d8350d568dfc3bf2a46aaa4109aab8c253fa517771fb8414\": rpc error: code = NotFound desc = could not find container \"14867abf92daa3e1d8350d568dfc3bf2a46aaa4109aab8c253fa517771fb8414\": container with ID starting with 14867abf92daa3e1d8350d568dfc3bf2a46aaa4109aab8c253fa517771fb8414 not found: ID does not exist" Nov 25 17:12:59 crc kubenswrapper[4812]: I1125 17:12:59.013379 4812 scope.go:117] "RemoveContainer" containerID="e06e41eef9e6ce69bb0869c83e5a9b2b62853471c61f81634019b5d8218e9d5b" Nov 25 17:12:59 crc kubenswrapper[4812]: I1125 17:12:59.014116 4812 kuberuntime_gc.go:361] "Error getting ContainerStatus for containerID" containerID="b4b66900c5aa1e17a0d53ff61d461d014b91986386823f163ffc78e1f72cb934" err="rpc error: code = NotFound desc = could not find container \"b4b66900c5aa1e17a0d53ff61d461d014b91986386823f163ffc78e1f72cb934\": container with ID starting with b4b66900c5aa1e17a0d53ff61d461d014b91986386823f163ffc78e1f72cb934 not found: ID does not exist" Nov 25 17:12:59 crc kubenswrapper[4812]: E1125 17:12:59.014141 4812 kuberuntime_gc.go:389] "Failed to remove container log dead symlink" err="remove /var/log/containers/certified-operators-tvgf8_openshift-marketplace_extract-content-b4b66900c5aa1e17a0d53ff61d461d014b91986386823f163ffc78e1f72cb934.log: no such file or directory" path="/var/log/containers/certified-operators-tvgf8_openshift-marketplace_extract-content-b4b66900c5aa1e17a0d53ff61d461d014b91986386823f163ffc78e1f72cb934.log" Nov 25 17:12:59 crc kubenswrapper[4812]: I1125 17:12:59.014552 4812 kuberuntime_gc.go:361] "Error getting ContainerStatus for containerID" containerID="14867abf92daa3e1d8350d568dfc3bf2a46aaa4109aab8c253fa517771fb8414" err="rpc error: code = NotFound desc = could not find container \"14867abf92daa3e1d8350d568dfc3bf2a46aaa4109aab8c253fa517771fb8414\": container with ID starting with 14867abf92daa3e1d8350d568dfc3bf2a46aaa4109aab8c253fa517771fb8414 not found: ID does not exist" Nov 25 17:12:59 crc kubenswrapper[4812]: E1125 17:12:59.014574 4812 kuberuntime_gc.go:389] "Failed to remove container log dead symlink" err="remove /var/log/containers/certified-operators-tvgf8_openshift-marketplace_extract-utilities-14867abf92daa3e1d8350d568dfc3bf2a46aaa4109aab8c253fa517771fb8414.log: no such file or directory" path="/var/log/containers/certified-operators-tvgf8_openshift-marketplace_extract-utilities-14867abf92daa3e1d8350d568dfc3bf2a46aaa4109aab8c253fa517771fb8414.log" Nov 25 17:12:59 crc kubenswrapper[4812]: I1125 17:12:59.014915 4812 kuberuntime_gc.go:361] "Error getting ContainerStatus for containerID" containerID="f5c8903c7f8a802821f94b4a9e0051bc17c1bb4a5b0d78088c51eab4b6907508" err="rpc error: code = NotFound desc = could not find container \"f5c8903c7f8a802821f94b4a9e0051bc17c1bb4a5b0d78088c51eab4b6907508\": container with ID starting with f5c8903c7f8a802821f94b4a9e0051bc17c1bb4a5b0d78088c51eab4b6907508 not found: ID does not exist" Nov 25 17:12:59 crc kubenswrapper[4812]: E1125 17:12:59.014940 4812 kuberuntime_gc.go:389] "Failed to remove container log dead symlink" err="remove /var/log/containers/certified-operators-tvgf8_openshift-marketplace_registry-server-f5c8903c7f8a802821f94b4a9e0051bc17c1bb4a5b0d78088c51eab4b6907508.log: no such file or directory" path="/var/log/containers/certified-operators-tvgf8_openshift-marketplace_registry-server-f5c8903c7f8a802821f94b4a9e0051bc17c1bb4a5b0d78088c51eab4b6907508.log" Nov 25 17:12:59 crc kubenswrapper[4812]: E1125 17:12:59.016353 4812 kuberuntime_gc.go:389] "Failed to remove container log dead symlink" err="remove /var/log/containers/community-operators-z72wj_openshift-marketplace_registry-server-e06e41eef9e6ce69bb0869c83e5a9b2b62853471c61f81634019b5d8218e9d5b.log: no such file or directory" path="/var/log/containers/community-operators-z72wj_openshift-marketplace_registry-server-e06e41eef9e6ce69bb0869c83e5a9b2b62853471c61f81634019b5d8218e9d5b.log" Nov 25 17:12:59 crc kubenswrapper[4812]: I1125 17:12:59.043563 4812 scope.go:117] "RemoveContainer" containerID="31dc3c6f7bfa2b3db2813b3c4768386035d78eb4e7e30f9026638722e0ca45a7" Nov 25 17:12:59 crc kubenswrapper[4812]: I1125 17:12:59.072657 4812 scope.go:117] "RemoveContainer" containerID="42da5c4b7ea5fca93fb824391cc751789096f34f2193408d935aa929561c214d" Nov 25 17:12:59 crc kubenswrapper[4812]: I1125 17:12:59.112177 4812 scope.go:117] "RemoveContainer" containerID="e06e41eef9e6ce69bb0869c83e5a9b2b62853471c61f81634019b5d8218e9d5b" Nov 25 17:12:59 crc kubenswrapper[4812]: E1125 17:12:59.113974 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e06e41eef9e6ce69bb0869c83e5a9b2b62853471c61f81634019b5d8218e9d5b\": container with ID starting with e06e41eef9e6ce69bb0869c83e5a9b2b62853471c61f81634019b5d8218e9d5b not found: ID does not exist" containerID="e06e41eef9e6ce69bb0869c83e5a9b2b62853471c61f81634019b5d8218e9d5b" Nov 25 17:12:59 crc kubenswrapper[4812]: I1125 17:12:59.114011 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e06e41eef9e6ce69bb0869c83e5a9b2b62853471c61f81634019b5d8218e9d5b"} err="failed to get container status \"e06e41eef9e6ce69bb0869c83e5a9b2b62853471c61f81634019b5d8218e9d5b\": rpc error: code = NotFound desc = could not find container \"e06e41eef9e6ce69bb0869c83e5a9b2b62853471c61f81634019b5d8218e9d5b\": container with ID starting with e06e41eef9e6ce69bb0869c83e5a9b2b62853471c61f81634019b5d8218e9d5b not found: ID does not exist" Nov 25 17:12:59 crc kubenswrapper[4812]: I1125 17:12:59.114042 4812 scope.go:117] "RemoveContainer" containerID="31dc3c6f7bfa2b3db2813b3c4768386035d78eb4e7e30f9026638722e0ca45a7" Nov 25 17:12:59 crc kubenswrapper[4812]: E1125 17:12:59.114582 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"31dc3c6f7bfa2b3db2813b3c4768386035d78eb4e7e30f9026638722e0ca45a7\": container with ID starting with 31dc3c6f7bfa2b3db2813b3c4768386035d78eb4e7e30f9026638722e0ca45a7 not found: ID does not exist" containerID="31dc3c6f7bfa2b3db2813b3c4768386035d78eb4e7e30f9026638722e0ca45a7" Nov 25 17:12:59 crc kubenswrapper[4812]: I1125 17:12:59.114644 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"31dc3c6f7bfa2b3db2813b3c4768386035d78eb4e7e30f9026638722e0ca45a7"} err="failed to get container status \"31dc3c6f7bfa2b3db2813b3c4768386035d78eb4e7e30f9026638722e0ca45a7\": rpc error: code = NotFound desc = could not find container \"31dc3c6f7bfa2b3db2813b3c4768386035d78eb4e7e30f9026638722e0ca45a7\": container with ID starting with 31dc3c6f7bfa2b3db2813b3c4768386035d78eb4e7e30f9026638722e0ca45a7 not found: ID does not exist" Nov 25 17:12:59 crc kubenswrapper[4812]: I1125 17:12:59.114664 4812 scope.go:117] "RemoveContainer" containerID="42da5c4b7ea5fca93fb824391cc751789096f34f2193408d935aa929561c214d" Nov 25 17:12:59 crc kubenswrapper[4812]: E1125 17:12:59.115017 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"42da5c4b7ea5fca93fb824391cc751789096f34f2193408d935aa929561c214d\": container with ID starting with 42da5c4b7ea5fca93fb824391cc751789096f34f2193408d935aa929561c214d not found: ID does not exist" containerID="42da5c4b7ea5fca93fb824391cc751789096f34f2193408d935aa929561c214d" Nov 25 17:12:59 crc kubenswrapper[4812]: I1125 17:12:59.115062 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"42da5c4b7ea5fca93fb824391cc751789096f34f2193408d935aa929561c214d"} err="failed to get container status \"42da5c4b7ea5fca93fb824391cc751789096f34f2193408d935aa929561c214d\": rpc error: code = NotFound desc = could not find container \"42da5c4b7ea5fca93fb824391cc751789096f34f2193408d935aa929561c214d\": container with ID starting with 42da5c4b7ea5fca93fb824391cc751789096f34f2193408d935aa929561c214d not found: ID does not exist" Nov 25 17:12:59 crc kubenswrapper[4812]: I1125 17:12:59.115081 4812 scope.go:117] "RemoveContainer" containerID="1a94b68e39d850c54b64983aea0134dfcee64b7f1907d0c3128ccd0b2e232483" Nov 25 17:13:00 crc kubenswrapper[4812]: I1125 17:13:00.513961 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-bpqbb" podUID="7e1417f9-84df-4f66-878f-5b70e2c90cf1" containerName="registry-server" containerID="cri-o://ec9e3a33e087f6cd27a03839d81a7fa33dda001fd592bd4c38e7788c3f81207d" gracePeriod=2 Nov 25 17:13:00 crc kubenswrapper[4812]: I1125 17:13:00.928697 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bpqbb" Nov 25 17:13:01 crc kubenswrapper[4812]: I1125 17:13:01.075795 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e1417f9-84df-4f66-878f-5b70e2c90cf1-utilities\") pod \"7e1417f9-84df-4f66-878f-5b70e2c90cf1\" (UID: \"7e1417f9-84df-4f66-878f-5b70e2c90cf1\") " Nov 25 17:13:01 crc kubenswrapper[4812]: I1125 17:13:01.075907 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e1417f9-84df-4f66-878f-5b70e2c90cf1-catalog-content\") pod \"7e1417f9-84df-4f66-878f-5b70e2c90cf1\" (UID: \"7e1417f9-84df-4f66-878f-5b70e2c90cf1\") " Nov 25 17:13:01 crc kubenswrapper[4812]: I1125 17:13:01.076073 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7ccpr\" (UniqueName: \"kubernetes.io/projected/7e1417f9-84df-4f66-878f-5b70e2c90cf1-kube-api-access-7ccpr\") pod \"7e1417f9-84df-4f66-878f-5b70e2c90cf1\" (UID: \"7e1417f9-84df-4f66-878f-5b70e2c90cf1\") " Nov 25 17:13:01 crc kubenswrapper[4812]: I1125 17:13:01.076605 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7e1417f9-84df-4f66-878f-5b70e2c90cf1-utilities" (OuterVolumeSpecName: "utilities") pod "7e1417f9-84df-4f66-878f-5b70e2c90cf1" (UID: "7e1417f9-84df-4f66-878f-5b70e2c90cf1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:13:01 crc kubenswrapper[4812]: I1125 17:13:01.081161 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e1417f9-84df-4f66-878f-5b70e2c90cf1-kube-api-access-7ccpr" (OuterVolumeSpecName: "kube-api-access-7ccpr") pod "7e1417f9-84df-4f66-878f-5b70e2c90cf1" (UID: "7e1417f9-84df-4f66-878f-5b70e2c90cf1"). InnerVolumeSpecName "kube-api-access-7ccpr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:13:01 crc kubenswrapper[4812]: I1125 17:13:01.173901 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7e1417f9-84df-4f66-878f-5b70e2c90cf1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7e1417f9-84df-4f66-878f-5b70e2c90cf1" (UID: "7e1417f9-84df-4f66-878f-5b70e2c90cf1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:13:01 crc kubenswrapper[4812]: I1125 17:13:01.178163 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7ccpr\" (UniqueName: \"kubernetes.io/projected/7e1417f9-84df-4f66-878f-5b70e2c90cf1-kube-api-access-7ccpr\") on node \"crc\" DevicePath \"\"" Nov 25 17:13:01 crc kubenswrapper[4812]: I1125 17:13:01.178301 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e1417f9-84df-4f66-878f-5b70e2c90cf1-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:13:01 crc kubenswrapper[4812]: I1125 17:13:01.178376 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e1417f9-84df-4f66-878f-5b70e2c90cf1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:13:01 crc kubenswrapper[4812]: I1125 17:13:01.524760 4812 generic.go:334] "Generic (PLEG): container finished" podID="7e1417f9-84df-4f66-878f-5b70e2c90cf1" containerID="ec9e3a33e087f6cd27a03839d81a7fa33dda001fd592bd4c38e7788c3f81207d" exitCode=0 Nov 25 17:13:01 crc kubenswrapper[4812]: I1125 17:13:01.524826 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bpqbb" Nov 25 17:13:01 crc kubenswrapper[4812]: I1125 17:13:01.524858 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bpqbb" event={"ID":"7e1417f9-84df-4f66-878f-5b70e2c90cf1","Type":"ContainerDied","Data":"ec9e3a33e087f6cd27a03839d81a7fa33dda001fd592bd4c38e7788c3f81207d"} Nov 25 17:13:01 crc kubenswrapper[4812]: I1125 17:13:01.525196 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bpqbb" event={"ID":"7e1417f9-84df-4f66-878f-5b70e2c90cf1","Type":"ContainerDied","Data":"3cd5bb1f1d3f35002025caa858d2e3ba0e5931936071a0111a781617a8742e56"} Nov 25 17:13:01 crc kubenswrapper[4812]: I1125 17:13:01.573311 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bpqbb"] Nov 25 17:13:01 crc kubenswrapper[4812]: I1125 17:13:01.582384 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-bpqbb"] Nov 25 17:13:01 crc kubenswrapper[4812]: I1125 17:13:01.843515 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7e1417f9-84df-4f66-878f-5b70e2c90cf1" path="/var/lib/kubelet/pods/7e1417f9-84df-4f66-878f-5b70e2c90cf1/volumes" Nov 25 17:13:06 crc kubenswrapper[4812]: I1125 17:13:06.602264 4812 scope.go:117] "RemoveContainer" containerID="b7da59f4a93b17654043d71643287117827fe8ba1388d7dcbd968aa322b8dfb5" Nov 25 17:13:06 crc kubenswrapper[4812]: I1125 17:13:06.621388 4812 scope.go:117] "RemoveContainer" containerID="12505d66bd8520b7de472a181116d9bba790bd7298645f6143d9aa1ba82683d4" Nov 25 17:13:06 crc kubenswrapper[4812]: I1125 17:13:06.665028 4812 scope.go:117] "RemoveContainer" containerID="1a94b68e39d850c54b64983aea0134dfcee64b7f1907d0c3128ccd0b2e232483" Nov 25 17:13:06 crc kubenswrapper[4812]: E1125 17:13:06.665595 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1a94b68e39d850c54b64983aea0134dfcee64b7f1907d0c3128ccd0b2e232483\": container with ID starting with 1a94b68e39d850c54b64983aea0134dfcee64b7f1907d0c3128ccd0b2e232483 not found: ID does not exist" containerID="1a94b68e39d850c54b64983aea0134dfcee64b7f1907d0c3128ccd0b2e232483" Nov 25 17:13:06 crc kubenswrapper[4812]: I1125 17:13:06.665637 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1a94b68e39d850c54b64983aea0134dfcee64b7f1907d0c3128ccd0b2e232483"} err="failed to get container status \"1a94b68e39d850c54b64983aea0134dfcee64b7f1907d0c3128ccd0b2e232483\": rpc error: code = NotFound desc = could not find container \"1a94b68e39d850c54b64983aea0134dfcee64b7f1907d0c3128ccd0b2e232483\": container with ID starting with 1a94b68e39d850c54b64983aea0134dfcee64b7f1907d0c3128ccd0b2e232483 not found: ID does not exist" Nov 25 17:13:06 crc kubenswrapper[4812]: I1125 17:13:06.665664 4812 scope.go:117] "RemoveContainer" containerID="b7da59f4a93b17654043d71643287117827fe8ba1388d7dcbd968aa322b8dfb5" Nov 25 17:13:06 crc kubenswrapper[4812]: E1125 17:13:06.666132 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b7da59f4a93b17654043d71643287117827fe8ba1388d7dcbd968aa322b8dfb5\": container with ID starting with b7da59f4a93b17654043d71643287117827fe8ba1388d7dcbd968aa322b8dfb5 not found: ID does not exist" containerID="b7da59f4a93b17654043d71643287117827fe8ba1388d7dcbd968aa322b8dfb5" Nov 25 17:13:06 crc kubenswrapper[4812]: I1125 17:13:06.666158 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7da59f4a93b17654043d71643287117827fe8ba1388d7dcbd968aa322b8dfb5"} err="failed to get container status \"b7da59f4a93b17654043d71643287117827fe8ba1388d7dcbd968aa322b8dfb5\": rpc error: code = NotFound desc = could not find container \"b7da59f4a93b17654043d71643287117827fe8ba1388d7dcbd968aa322b8dfb5\": container with ID starting with b7da59f4a93b17654043d71643287117827fe8ba1388d7dcbd968aa322b8dfb5 not found: ID does not exist" Nov 25 17:13:06 crc kubenswrapper[4812]: I1125 17:13:06.666173 4812 scope.go:117] "RemoveContainer" containerID="12505d66bd8520b7de472a181116d9bba790bd7298645f6143d9aa1ba82683d4" Nov 25 17:13:06 crc kubenswrapper[4812]: E1125 17:13:06.666447 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"12505d66bd8520b7de472a181116d9bba790bd7298645f6143d9aa1ba82683d4\": container with ID starting with 12505d66bd8520b7de472a181116d9bba790bd7298645f6143d9aa1ba82683d4 not found: ID does not exist" containerID="12505d66bd8520b7de472a181116d9bba790bd7298645f6143d9aa1ba82683d4" Nov 25 17:13:06 crc kubenswrapper[4812]: I1125 17:13:06.666495 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"12505d66bd8520b7de472a181116d9bba790bd7298645f6143d9aa1ba82683d4"} err="failed to get container status \"12505d66bd8520b7de472a181116d9bba790bd7298645f6143d9aa1ba82683d4\": rpc error: code = NotFound desc = could not find container \"12505d66bd8520b7de472a181116d9bba790bd7298645f6143d9aa1ba82683d4\": container with ID starting with 12505d66bd8520b7de472a181116d9bba790bd7298645f6143d9aa1ba82683d4 not found: ID does not exist" Nov 25 17:13:06 crc kubenswrapper[4812]: I1125 17:13:06.666549 4812 scope.go:117] "RemoveContainer" containerID="ec9e3a33e087f6cd27a03839d81a7fa33dda001fd592bd4c38e7788c3f81207d" Nov 25 17:13:06 crc kubenswrapper[4812]: I1125 17:13:06.700383 4812 scope.go:117] "RemoveContainer" containerID="6cf7fd02552106f44f9ce015dc065c43c4a5ba0ceecf4581debd7c64b0cccd0e" Nov 25 17:13:06 crc kubenswrapper[4812]: I1125 17:13:06.728134 4812 scope.go:117] "RemoveContainer" containerID="aa37f05bc93476d4bda15ca0b9a9540bc9894f3432fd2e8c74932ea0a12e6f4d" Nov 25 17:13:06 crc kubenswrapper[4812]: I1125 17:13:06.760012 4812 scope.go:117] "RemoveContainer" containerID="ec9e3a33e087f6cd27a03839d81a7fa33dda001fd592bd4c38e7788c3f81207d" Nov 25 17:13:06 crc kubenswrapper[4812]: E1125 17:13:06.760509 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ec9e3a33e087f6cd27a03839d81a7fa33dda001fd592bd4c38e7788c3f81207d\": container with ID starting with ec9e3a33e087f6cd27a03839d81a7fa33dda001fd592bd4c38e7788c3f81207d not found: ID does not exist" containerID="ec9e3a33e087f6cd27a03839d81a7fa33dda001fd592bd4c38e7788c3f81207d" Nov 25 17:13:06 crc kubenswrapper[4812]: I1125 17:13:06.760665 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ec9e3a33e087f6cd27a03839d81a7fa33dda001fd592bd4c38e7788c3f81207d"} err="failed to get container status \"ec9e3a33e087f6cd27a03839d81a7fa33dda001fd592bd4c38e7788c3f81207d\": rpc error: code = NotFound desc = could not find container \"ec9e3a33e087f6cd27a03839d81a7fa33dda001fd592bd4c38e7788c3f81207d\": container with ID starting with ec9e3a33e087f6cd27a03839d81a7fa33dda001fd592bd4c38e7788c3f81207d not found: ID does not exist" Nov 25 17:13:06 crc kubenswrapper[4812]: I1125 17:13:06.760779 4812 scope.go:117] "RemoveContainer" containerID="6cf7fd02552106f44f9ce015dc065c43c4a5ba0ceecf4581debd7c64b0cccd0e" Nov 25 17:13:06 crc kubenswrapper[4812]: E1125 17:13:06.761232 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6cf7fd02552106f44f9ce015dc065c43c4a5ba0ceecf4581debd7c64b0cccd0e\": container with ID starting with 6cf7fd02552106f44f9ce015dc065c43c4a5ba0ceecf4581debd7c64b0cccd0e not found: ID does not exist" containerID="6cf7fd02552106f44f9ce015dc065c43c4a5ba0ceecf4581debd7c64b0cccd0e" Nov 25 17:13:06 crc kubenswrapper[4812]: I1125 17:13:06.761281 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6cf7fd02552106f44f9ce015dc065c43c4a5ba0ceecf4581debd7c64b0cccd0e"} err="failed to get container status \"6cf7fd02552106f44f9ce015dc065c43c4a5ba0ceecf4581debd7c64b0cccd0e\": rpc error: code = NotFound desc = could not find container \"6cf7fd02552106f44f9ce015dc065c43c4a5ba0ceecf4581debd7c64b0cccd0e\": container with ID starting with 6cf7fd02552106f44f9ce015dc065c43c4a5ba0ceecf4581debd7c64b0cccd0e not found: ID does not exist" Nov 25 17:13:06 crc kubenswrapper[4812]: I1125 17:13:06.761307 4812 scope.go:117] "RemoveContainer" containerID="aa37f05bc93476d4bda15ca0b9a9540bc9894f3432fd2e8c74932ea0a12e6f4d" Nov 25 17:13:06 crc kubenswrapper[4812]: E1125 17:13:06.761616 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aa37f05bc93476d4bda15ca0b9a9540bc9894f3432fd2e8c74932ea0a12e6f4d\": container with ID starting with aa37f05bc93476d4bda15ca0b9a9540bc9894f3432fd2e8c74932ea0a12e6f4d not found: ID does not exist" containerID="aa37f05bc93476d4bda15ca0b9a9540bc9894f3432fd2e8c74932ea0a12e6f4d" Nov 25 17:13:06 crc kubenswrapper[4812]: I1125 17:13:06.761660 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aa37f05bc93476d4bda15ca0b9a9540bc9894f3432fd2e8c74932ea0a12e6f4d"} err="failed to get container status \"aa37f05bc93476d4bda15ca0b9a9540bc9894f3432fd2e8c74932ea0a12e6f4d\": rpc error: code = NotFound desc = could not find container \"aa37f05bc93476d4bda15ca0b9a9540bc9894f3432fd2e8c74932ea0a12e6f4d\": container with ID starting with aa37f05bc93476d4bda15ca0b9a9540bc9894f3432fd2e8c74932ea0a12e6f4d not found: ID does not exist" Nov 25 17:13:19 crc kubenswrapper[4812]: I1125 17:13:19.055319 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-d799-account-create-f4x79"] Nov 25 17:13:19 crc kubenswrapper[4812]: I1125 17:13:19.066061 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-d799-account-create-f4x79"] Nov 25 17:13:19 crc kubenswrapper[4812]: I1125 17:13:19.076569 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-g9jm8"] Nov 25 17:13:19 crc kubenswrapper[4812]: I1125 17:13:19.084132 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-g9jm8"] Nov 25 17:13:19 crc kubenswrapper[4812]: I1125 17:13:19.857154 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8286a549-6491-4255-9332-282eb9297c35" path="/var/lib/kubelet/pods/8286a549-6491-4255-9332-282eb9297c35/volumes" Nov 25 17:13:19 crc kubenswrapper[4812]: I1125 17:13:19.858110 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="908bb661-4226-4cb0-8527-b9a93f6048e1" path="/var/lib/kubelet/pods/908bb661-4226-4cb0-8527-b9a93f6048e1/volumes" Nov 25 17:13:20 crc kubenswrapper[4812]: I1125 17:13:20.027688 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-z45dw"] Nov 25 17:13:20 crc kubenswrapper[4812]: I1125 17:13:20.036451 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-7b45-account-create-shvlr"] Nov 25 17:13:20 crc kubenswrapper[4812]: I1125 17:13:20.043847 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-z45dw"] Nov 25 17:13:20 crc kubenswrapper[4812]: I1125 17:13:20.051076 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-7b45-account-create-shvlr"] Nov 25 17:13:21 crc kubenswrapper[4812]: I1125 17:13:21.841827 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0489df11-52f0-4093-9210-e621ba69425c" path="/var/lib/kubelet/pods/0489df11-52f0-4093-9210-e621ba69425c/volumes" Nov 25 17:13:21 crc kubenswrapper[4812]: I1125 17:13:21.843342 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d390e826-1601-43d3-bbf2-1865db1b963f" path="/var/lib/kubelet/pods/d390e826-1601-43d3-bbf2-1865db1b963f/volumes" Nov 25 17:13:25 crc kubenswrapper[4812]: I1125 17:13:25.033427 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-da6c-account-create-nz8bg"] Nov 25 17:13:25 crc kubenswrapper[4812]: I1125 17:13:25.050451 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-9wkjm"] Nov 25 17:13:25 crc kubenswrapper[4812]: I1125 17:13:25.059725 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-da6c-account-create-nz8bg"] Nov 25 17:13:25 crc kubenswrapper[4812]: I1125 17:13:25.068345 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-9wkjm"] Nov 25 17:13:25 crc kubenswrapper[4812]: I1125 17:13:25.847702 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="45a126f9-1bc4-4142-aed5-170f467d104a" path="/var/lib/kubelet/pods/45a126f9-1bc4-4142-aed5-170f467d104a/volumes" Nov 25 17:13:25 crc kubenswrapper[4812]: I1125 17:13:25.849778 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7335e288-40ba-43f9-b713-ae2d0b6fea48" path="/var/lib/kubelet/pods/7335e288-40ba-43f9-b713-ae2d0b6fea48/volumes" Nov 25 17:13:27 crc kubenswrapper[4812]: I1125 17:13:27.332937 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:13:27 crc kubenswrapper[4812]: I1125 17:13:27.333323 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:13:50 crc kubenswrapper[4812]: I1125 17:13:50.041219 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-n9rp2"] Nov 25 17:13:50 crc kubenswrapper[4812]: I1125 17:13:50.051107 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-n9rp2"] Nov 25 17:13:51 crc kubenswrapper[4812]: I1125 17:13:51.841429 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9c9a938f-a4f9-4739-837a-928721d40a65" path="/var/lib/kubelet/pods/9c9a938f-a4f9-4739-837a-928721d40a65/volumes" Nov 25 17:13:57 crc kubenswrapper[4812]: I1125 17:13:57.332326 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:13:57 crc kubenswrapper[4812]: I1125 17:13:57.332605 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:13:57 crc kubenswrapper[4812]: I1125 17:13:57.332649 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" Nov 25 17:13:57 crc kubenswrapper[4812]: I1125 17:13:57.333283 4812 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ae34a05aa38c6a99c86afe89c120c203f727ec066fc3aabb6dbd6d38ccbc4ae5"} pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 17:13:57 crc kubenswrapper[4812]: I1125 17:13:57.333332 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" containerID="cri-o://ae34a05aa38c6a99c86afe89c120c203f727ec066fc3aabb6dbd6d38ccbc4ae5" gracePeriod=600 Nov 25 17:13:57 crc kubenswrapper[4812]: E1125 17:13:57.451351 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:13:58 crc kubenswrapper[4812]: I1125 17:13:58.030579 4812 generic.go:334] "Generic (PLEG): container finished" podID="8ed911cf-2139-4b12-84ba-af635585ba29" containerID="ae34a05aa38c6a99c86afe89c120c203f727ec066fc3aabb6dbd6d38ccbc4ae5" exitCode=0 Nov 25 17:13:58 crc kubenswrapper[4812]: I1125 17:13:58.030630 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" event={"ID":"8ed911cf-2139-4b12-84ba-af635585ba29","Type":"ContainerDied","Data":"ae34a05aa38c6a99c86afe89c120c203f727ec066fc3aabb6dbd6d38ccbc4ae5"} Nov 25 17:13:58 crc kubenswrapper[4812]: I1125 17:13:58.030674 4812 scope.go:117] "RemoveContainer" containerID="928bfffc467fb2700fc3f642988d9c74f29633743edcd7e42d0737b45e725dce" Nov 25 17:13:58 crc kubenswrapper[4812]: I1125 17:13:58.031698 4812 scope.go:117] "RemoveContainer" containerID="ae34a05aa38c6a99c86afe89c120c203f727ec066fc3aabb6dbd6d38ccbc4ae5" Nov 25 17:13:58 crc kubenswrapper[4812]: E1125 17:13:58.034796 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:13:59 crc kubenswrapper[4812]: I1125 17:13:59.023665 4812 scope.go:117] "RemoveContainer" containerID="76db7f2d4c1ce2bca264f19aa7ad15682e585bb1963fd8276e0622f501956be0" Nov 25 17:13:59 crc kubenswrapper[4812]: I1125 17:13:59.040276 4812 generic.go:334] "Generic (PLEG): container finished" podID="9771507e-2a96-4539-ac2f-00a418f64803" containerID="c915e0f5460edce953cce2157147360fd24054c7dfe2f403e2c1981b99f099a8" exitCode=0 Nov 25 17:13:59 crc kubenswrapper[4812]: I1125 17:13:59.040345 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qsf5l" event={"ID":"9771507e-2a96-4539-ac2f-00a418f64803","Type":"ContainerDied","Data":"c915e0f5460edce953cce2157147360fd24054c7dfe2f403e2c1981b99f099a8"} Nov 25 17:13:59 crc kubenswrapper[4812]: I1125 17:13:59.056233 4812 scope.go:117] "RemoveContainer" containerID="e21af590e7ad85ab22d50cbb7c8099cb44f8db9dd2224107c0c27007c406b501" Nov 25 17:13:59 crc kubenswrapper[4812]: I1125 17:13:59.082809 4812 scope.go:117] "RemoveContainer" containerID="845dd128a87dccd086b78f9996bef637cada05bed2f95ace55818d08e138f102" Nov 25 17:13:59 crc kubenswrapper[4812]: I1125 17:13:59.134841 4812 scope.go:117] "RemoveContainer" containerID="5b005913fa94458146c1e7a30fa43a961c9fa4907793a88d4e5c5e79f095304d" Nov 25 17:13:59 crc kubenswrapper[4812]: I1125 17:13:59.161768 4812 scope.go:117] "RemoveContainer" containerID="0f89af6705b07691384a44ad4082cc1f45f2881207fa4382a17d32d394e75ed1" Nov 25 17:13:59 crc kubenswrapper[4812]: I1125 17:13:59.197077 4812 scope.go:117] "RemoveContainer" containerID="c6af31ffe01216181fa5ea3dfda17123caeb566808fb13b76602fbf47d9210ca" Nov 25 17:13:59 crc kubenswrapper[4812]: I1125 17:13:59.234206 4812 scope.go:117] "RemoveContainer" containerID="fc3b21a12b6fac380cbb329b165e6ab778cb58791d7974511dbe80091c62f451" Nov 25 17:14:00 crc kubenswrapper[4812]: I1125 17:14:00.494255 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qsf5l" Nov 25 17:14:00 crc kubenswrapper[4812]: I1125 17:14:00.640609 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-swjbd\" (UniqueName: \"kubernetes.io/projected/9771507e-2a96-4539-ac2f-00a418f64803-kube-api-access-swjbd\") pod \"9771507e-2a96-4539-ac2f-00a418f64803\" (UID: \"9771507e-2a96-4539-ac2f-00a418f64803\") " Nov 25 17:14:00 crc kubenswrapper[4812]: I1125 17:14:00.640706 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9771507e-2a96-4539-ac2f-00a418f64803-ssh-key\") pod \"9771507e-2a96-4539-ac2f-00a418f64803\" (UID: \"9771507e-2a96-4539-ac2f-00a418f64803\") " Nov 25 17:14:00 crc kubenswrapper[4812]: I1125 17:14:00.640768 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9771507e-2a96-4539-ac2f-00a418f64803-inventory\") pod \"9771507e-2a96-4539-ac2f-00a418f64803\" (UID: \"9771507e-2a96-4539-ac2f-00a418f64803\") " Nov 25 17:14:00 crc kubenswrapper[4812]: I1125 17:14:00.646261 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9771507e-2a96-4539-ac2f-00a418f64803-kube-api-access-swjbd" (OuterVolumeSpecName: "kube-api-access-swjbd") pod "9771507e-2a96-4539-ac2f-00a418f64803" (UID: "9771507e-2a96-4539-ac2f-00a418f64803"). InnerVolumeSpecName "kube-api-access-swjbd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:14:00 crc kubenswrapper[4812]: I1125 17:14:00.665389 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9771507e-2a96-4539-ac2f-00a418f64803-inventory" (OuterVolumeSpecName: "inventory") pod "9771507e-2a96-4539-ac2f-00a418f64803" (UID: "9771507e-2a96-4539-ac2f-00a418f64803"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:14:00 crc kubenswrapper[4812]: I1125 17:14:00.672649 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9771507e-2a96-4539-ac2f-00a418f64803-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "9771507e-2a96-4539-ac2f-00a418f64803" (UID: "9771507e-2a96-4539-ac2f-00a418f64803"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:14:00 crc kubenswrapper[4812]: I1125 17:14:00.743170 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-swjbd\" (UniqueName: \"kubernetes.io/projected/9771507e-2a96-4539-ac2f-00a418f64803-kube-api-access-swjbd\") on node \"crc\" DevicePath \"\"" Nov 25 17:14:00 crc kubenswrapper[4812]: I1125 17:14:00.743213 4812 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9771507e-2a96-4539-ac2f-00a418f64803-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 17:14:00 crc kubenswrapper[4812]: I1125 17:14:00.743227 4812 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9771507e-2a96-4539-ac2f-00a418f64803-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 17:14:01 crc kubenswrapper[4812]: I1125 17:14:01.061939 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qsf5l" event={"ID":"9771507e-2a96-4539-ac2f-00a418f64803","Type":"ContainerDied","Data":"299488dbd9ac0a392fcb4e136f838ee982bf6413ed82094ec15309bff5e6b00d"} Nov 25 17:14:01 crc kubenswrapper[4812]: I1125 17:14:01.061981 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="299488dbd9ac0a392fcb4e136f838ee982bf6413ed82094ec15309bff5e6b00d" Nov 25 17:14:01 crc kubenswrapper[4812]: I1125 17:14:01.062447 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qsf5l" Nov 25 17:14:01 crc kubenswrapper[4812]: I1125 17:14:01.136221 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xqw5m"] Nov 25 17:14:01 crc kubenswrapper[4812]: E1125 17:14:01.136674 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e1417f9-84df-4f66-878f-5b70e2c90cf1" containerName="extract-content" Nov 25 17:14:01 crc kubenswrapper[4812]: I1125 17:14:01.136691 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e1417f9-84df-4f66-878f-5b70e2c90cf1" containerName="extract-content" Nov 25 17:14:01 crc kubenswrapper[4812]: E1125 17:14:01.136705 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e1417f9-84df-4f66-878f-5b70e2c90cf1" containerName="registry-server" Nov 25 17:14:01 crc kubenswrapper[4812]: I1125 17:14:01.136712 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e1417f9-84df-4f66-878f-5b70e2c90cf1" containerName="registry-server" Nov 25 17:14:01 crc kubenswrapper[4812]: E1125 17:14:01.136733 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9771507e-2a96-4539-ac2f-00a418f64803" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 25 17:14:01 crc kubenswrapper[4812]: I1125 17:14:01.136740 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="9771507e-2a96-4539-ac2f-00a418f64803" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 25 17:14:01 crc kubenswrapper[4812]: E1125 17:14:01.136762 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e1417f9-84df-4f66-878f-5b70e2c90cf1" containerName="extract-utilities" Nov 25 17:14:01 crc kubenswrapper[4812]: I1125 17:14:01.136769 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e1417f9-84df-4f66-878f-5b70e2c90cf1" containerName="extract-utilities" Nov 25 17:14:01 crc kubenswrapper[4812]: I1125 17:14:01.136944 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="9771507e-2a96-4539-ac2f-00a418f64803" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 25 17:14:01 crc kubenswrapper[4812]: I1125 17:14:01.136975 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e1417f9-84df-4f66-878f-5b70e2c90cf1" containerName="registry-server" Nov 25 17:14:01 crc kubenswrapper[4812]: I1125 17:14:01.137564 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xqw5m" Nov 25 17:14:01 crc kubenswrapper[4812]: I1125 17:14:01.139605 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 17:14:01 crc kubenswrapper[4812]: I1125 17:14:01.139740 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 17:14:01 crc kubenswrapper[4812]: I1125 17:14:01.139870 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 17:14:01 crc kubenswrapper[4812]: I1125 17:14:01.162125 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wtld4" Nov 25 17:14:01 crc kubenswrapper[4812]: I1125 17:14:01.169419 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xqw5m"] Nov 25 17:14:01 crc kubenswrapper[4812]: I1125 17:14:01.254410 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k7xl9\" (UniqueName: \"kubernetes.io/projected/3dfa6352-9a39-4a26-9289-e861e371b922-kube-api-access-k7xl9\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-xqw5m\" (UID: \"3dfa6352-9a39-4a26-9289-e861e371b922\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xqw5m" Nov 25 17:14:01 crc kubenswrapper[4812]: I1125 17:14:01.254633 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3dfa6352-9a39-4a26-9289-e861e371b922-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-xqw5m\" (UID: \"3dfa6352-9a39-4a26-9289-e861e371b922\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xqw5m" Nov 25 17:14:01 crc kubenswrapper[4812]: I1125 17:14:01.254796 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3dfa6352-9a39-4a26-9289-e861e371b922-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-xqw5m\" (UID: \"3dfa6352-9a39-4a26-9289-e861e371b922\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xqw5m" Nov 25 17:14:01 crc kubenswrapper[4812]: I1125 17:14:01.357258 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3dfa6352-9a39-4a26-9289-e861e371b922-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-xqw5m\" (UID: \"3dfa6352-9a39-4a26-9289-e861e371b922\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xqw5m" Nov 25 17:14:01 crc kubenswrapper[4812]: I1125 17:14:01.357420 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3dfa6352-9a39-4a26-9289-e861e371b922-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-xqw5m\" (UID: \"3dfa6352-9a39-4a26-9289-e861e371b922\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xqw5m" Nov 25 17:14:01 crc kubenswrapper[4812]: I1125 17:14:01.357640 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k7xl9\" (UniqueName: \"kubernetes.io/projected/3dfa6352-9a39-4a26-9289-e861e371b922-kube-api-access-k7xl9\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-xqw5m\" (UID: \"3dfa6352-9a39-4a26-9289-e861e371b922\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xqw5m" Nov 25 17:14:01 crc kubenswrapper[4812]: I1125 17:14:01.362135 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3dfa6352-9a39-4a26-9289-e861e371b922-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-xqw5m\" (UID: \"3dfa6352-9a39-4a26-9289-e861e371b922\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xqw5m" Nov 25 17:14:01 crc kubenswrapper[4812]: I1125 17:14:01.364231 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3dfa6352-9a39-4a26-9289-e861e371b922-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-xqw5m\" (UID: \"3dfa6352-9a39-4a26-9289-e861e371b922\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xqw5m" Nov 25 17:14:01 crc kubenswrapper[4812]: I1125 17:14:01.375078 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k7xl9\" (UniqueName: \"kubernetes.io/projected/3dfa6352-9a39-4a26-9289-e861e371b922-kube-api-access-k7xl9\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-xqw5m\" (UID: \"3dfa6352-9a39-4a26-9289-e861e371b922\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xqw5m" Nov 25 17:14:01 crc kubenswrapper[4812]: I1125 17:14:01.481061 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xqw5m" Nov 25 17:14:01 crc kubenswrapper[4812]: I1125 17:14:01.995234 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xqw5m"] Nov 25 17:14:02 crc kubenswrapper[4812]: I1125 17:14:02.000548 4812 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 17:14:02 crc kubenswrapper[4812]: I1125 17:14:02.043163 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-61d1-account-create-znx74"] Nov 25 17:14:02 crc kubenswrapper[4812]: I1125 17:14:02.055319 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-61d1-account-create-znx74"] Nov 25 17:14:02 crc kubenswrapper[4812]: I1125 17:14:02.069870 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xqw5m" event={"ID":"3dfa6352-9a39-4a26-9289-e861e371b922","Type":"ContainerStarted","Data":"be88d133adc3c737d9dedd8581c5cb670e47cca35b8ea19c6186bcd68119e3e4"} Nov 25 17:14:03 crc kubenswrapper[4812]: I1125 17:14:03.039270 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-xkff4"] Nov 25 17:14:03 crc kubenswrapper[4812]: I1125 17:14:03.048669 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-f4f4-account-create-zlgh5"] Nov 25 17:14:03 crc kubenswrapper[4812]: I1125 17:14:03.058110 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-ndx2b"] Nov 25 17:14:03 crc kubenswrapper[4812]: I1125 17:14:03.065441 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-f4f4-account-create-zlgh5"] Nov 25 17:14:03 crc kubenswrapper[4812]: I1125 17:14:03.073029 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-qhkxm"] Nov 25 17:14:03 crc kubenswrapper[4812]: I1125 17:14:03.080166 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xqw5m" event={"ID":"3dfa6352-9a39-4a26-9289-e861e371b922","Type":"ContainerStarted","Data":"ac39971a23a1d7dfe5e74f9a4d8477464e4067365d842a9ed93e1f1d75710261"} Nov 25 17:14:03 crc kubenswrapper[4812]: I1125 17:14:03.081799 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-ndx2b"] Nov 25 17:14:03 crc kubenswrapper[4812]: I1125 17:14:03.091747 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-xkff4"] Nov 25 17:14:03 crc kubenswrapper[4812]: I1125 17:14:03.104592 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-qhkxm"] Nov 25 17:14:03 crc kubenswrapper[4812]: I1125 17:14:03.111068 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-5a3f-account-create-mzlm9"] Nov 25 17:14:03 crc kubenswrapper[4812]: I1125 17:14:03.119247 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-5a3f-account-create-mzlm9"] Nov 25 17:14:03 crc kubenswrapper[4812]: I1125 17:14:03.123407 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xqw5m" podStartSLOduration=1.645452357 podStartE2EDuration="2.12338569s" podCreationTimestamp="2025-11-25 17:14:01 +0000 UTC" firstStartedPulling="2025-11-25 17:14:02.000332809 +0000 UTC m=+1616.840474914" lastFinishedPulling="2025-11-25 17:14:02.478266152 +0000 UTC m=+1617.318408247" observedRunningTime="2025-11-25 17:14:03.09973391 +0000 UTC m=+1617.939876005" watchObservedRunningTime="2025-11-25 17:14:03.12338569 +0000 UTC m=+1617.963527785" Nov 25 17:14:03 crc kubenswrapper[4812]: I1125 17:14:03.844220 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="28562fbc-1113-4840-a9cf-597672e44f69" path="/var/lib/kubelet/pods/28562fbc-1113-4840-a9cf-597672e44f69/volumes" Nov 25 17:14:03 crc kubenswrapper[4812]: I1125 17:14:03.844837 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="703cd6e3-90a5-4c10-9f88-d4faac8e24e0" path="/var/lib/kubelet/pods/703cd6e3-90a5-4c10-9f88-d4faac8e24e0/volumes" Nov 25 17:14:03 crc kubenswrapper[4812]: I1125 17:14:03.845497 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a560774-b983-4fb0-b630-f9913688c130" path="/var/lib/kubelet/pods/9a560774-b983-4fb0-b630-f9913688c130/volumes" Nov 25 17:14:03 crc kubenswrapper[4812]: I1125 17:14:03.846151 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c10632e3-be65-42c8-b358-b3ce41252b94" path="/var/lib/kubelet/pods/c10632e3-be65-42c8-b358-b3ce41252b94/volumes" Nov 25 17:14:03 crc kubenswrapper[4812]: I1125 17:14:03.847328 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f65975c6-56d0-456e-8c7c-ac900b682f94" path="/var/lib/kubelet/pods/f65975c6-56d0-456e-8c7c-ac900b682f94/volumes" Nov 25 17:14:03 crc kubenswrapper[4812]: I1125 17:14:03.847875 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f6baf467-334f-4b16-8460-f590e01d6f65" path="/var/lib/kubelet/pods/f6baf467-334f-4b16-8460-f590e01d6f65/volumes" Nov 25 17:14:08 crc kubenswrapper[4812]: I1125 17:14:08.130300 4812 generic.go:334] "Generic (PLEG): container finished" podID="3dfa6352-9a39-4a26-9289-e861e371b922" containerID="ac39971a23a1d7dfe5e74f9a4d8477464e4067365d842a9ed93e1f1d75710261" exitCode=0 Nov 25 17:14:08 crc kubenswrapper[4812]: I1125 17:14:08.130423 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xqw5m" event={"ID":"3dfa6352-9a39-4a26-9289-e861e371b922","Type":"ContainerDied","Data":"ac39971a23a1d7dfe5e74f9a4d8477464e4067365d842a9ed93e1f1d75710261"} Nov 25 17:14:09 crc kubenswrapper[4812]: I1125 17:14:09.517163 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xqw5m" Nov 25 17:14:09 crc kubenswrapper[4812]: I1125 17:14:09.596652 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3dfa6352-9a39-4a26-9289-e861e371b922-inventory\") pod \"3dfa6352-9a39-4a26-9289-e861e371b922\" (UID: \"3dfa6352-9a39-4a26-9289-e861e371b922\") " Nov 25 17:14:09 crc kubenswrapper[4812]: I1125 17:14:09.596912 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3dfa6352-9a39-4a26-9289-e861e371b922-ssh-key\") pod \"3dfa6352-9a39-4a26-9289-e861e371b922\" (UID: \"3dfa6352-9a39-4a26-9289-e861e371b922\") " Nov 25 17:14:09 crc kubenswrapper[4812]: I1125 17:14:09.597132 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k7xl9\" (UniqueName: \"kubernetes.io/projected/3dfa6352-9a39-4a26-9289-e861e371b922-kube-api-access-k7xl9\") pod \"3dfa6352-9a39-4a26-9289-e861e371b922\" (UID: \"3dfa6352-9a39-4a26-9289-e861e371b922\") " Nov 25 17:14:09 crc kubenswrapper[4812]: I1125 17:14:09.603963 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3dfa6352-9a39-4a26-9289-e861e371b922-kube-api-access-k7xl9" (OuterVolumeSpecName: "kube-api-access-k7xl9") pod "3dfa6352-9a39-4a26-9289-e861e371b922" (UID: "3dfa6352-9a39-4a26-9289-e861e371b922"). InnerVolumeSpecName "kube-api-access-k7xl9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:14:09 crc kubenswrapper[4812]: I1125 17:14:09.626905 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3dfa6352-9a39-4a26-9289-e861e371b922-inventory" (OuterVolumeSpecName: "inventory") pod "3dfa6352-9a39-4a26-9289-e861e371b922" (UID: "3dfa6352-9a39-4a26-9289-e861e371b922"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:14:09 crc kubenswrapper[4812]: I1125 17:14:09.627354 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3dfa6352-9a39-4a26-9289-e861e371b922-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "3dfa6352-9a39-4a26-9289-e861e371b922" (UID: "3dfa6352-9a39-4a26-9289-e861e371b922"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:14:09 crc kubenswrapper[4812]: I1125 17:14:09.698758 4812 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3dfa6352-9a39-4a26-9289-e861e371b922-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 17:14:09 crc kubenswrapper[4812]: I1125 17:14:09.698785 4812 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3dfa6352-9a39-4a26-9289-e861e371b922-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 17:14:09 crc kubenswrapper[4812]: I1125 17:14:09.698795 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k7xl9\" (UniqueName: \"kubernetes.io/projected/3dfa6352-9a39-4a26-9289-e861e371b922-kube-api-access-k7xl9\") on node \"crc\" DevicePath \"\"" Nov 25 17:14:10 crc kubenswrapper[4812]: I1125 17:14:10.155825 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xqw5m" event={"ID":"3dfa6352-9a39-4a26-9289-e861e371b922","Type":"ContainerDied","Data":"be88d133adc3c737d9dedd8581c5cb670e47cca35b8ea19c6186bcd68119e3e4"} Nov 25 17:14:10 crc kubenswrapper[4812]: I1125 17:14:10.155898 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="be88d133adc3c737d9dedd8581c5cb670e47cca35b8ea19c6186bcd68119e3e4" Nov 25 17:14:10 crc kubenswrapper[4812]: I1125 17:14:10.156002 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xqw5m" Nov 25 17:14:10 crc kubenswrapper[4812]: I1125 17:14:10.239933 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-qxh2k"] Nov 25 17:14:10 crc kubenswrapper[4812]: E1125 17:14:10.240706 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3dfa6352-9a39-4a26-9289-e861e371b922" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 25 17:14:10 crc kubenswrapper[4812]: I1125 17:14:10.240731 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="3dfa6352-9a39-4a26-9289-e861e371b922" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 25 17:14:10 crc kubenswrapper[4812]: I1125 17:14:10.240990 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="3dfa6352-9a39-4a26-9289-e861e371b922" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 25 17:14:10 crc kubenswrapper[4812]: I1125 17:14:10.241753 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-qxh2k" Nov 25 17:14:10 crc kubenswrapper[4812]: I1125 17:14:10.244368 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 17:14:10 crc kubenswrapper[4812]: I1125 17:14:10.245726 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wtld4" Nov 25 17:14:10 crc kubenswrapper[4812]: I1125 17:14:10.245863 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 17:14:10 crc kubenswrapper[4812]: I1125 17:14:10.246725 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 17:14:10 crc kubenswrapper[4812]: I1125 17:14:10.263611 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-qxh2k"] Nov 25 17:14:10 crc kubenswrapper[4812]: I1125 17:14:10.410802 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-qxh2k\" (UID: \"d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-qxh2k" Nov 25 17:14:10 crc kubenswrapper[4812]: I1125 17:14:10.410875 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-qxh2k\" (UID: \"d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-qxh2k" Nov 25 17:14:10 crc kubenswrapper[4812]: I1125 17:14:10.411000 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5f8kl\" (UniqueName: \"kubernetes.io/projected/d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71-kube-api-access-5f8kl\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-qxh2k\" (UID: \"d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-qxh2k" Nov 25 17:14:10 crc kubenswrapper[4812]: I1125 17:14:10.512993 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5f8kl\" (UniqueName: \"kubernetes.io/projected/d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71-kube-api-access-5f8kl\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-qxh2k\" (UID: \"d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-qxh2k" Nov 25 17:14:10 crc kubenswrapper[4812]: I1125 17:14:10.513322 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-qxh2k\" (UID: \"d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-qxh2k" Nov 25 17:14:10 crc kubenswrapper[4812]: I1125 17:14:10.513398 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-qxh2k\" (UID: \"d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-qxh2k" Nov 25 17:14:10 crc kubenswrapper[4812]: I1125 17:14:10.521423 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-qxh2k\" (UID: \"d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-qxh2k" Nov 25 17:14:10 crc kubenswrapper[4812]: I1125 17:14:10.521749 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-qxh2k\" (UID: \"d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-qxh2k" Nov 25 17:14:10 crc kubenswrapper[4812]: I1125 17:14:10.542339 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5f8kl\" (UniqueName: \"kubernetes.io/projected/d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71-kube-api-access-5f8kl\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-qxh2k\" (UID: \"d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-qxh2k" Nov 25 17:14:10 crc kubenswrapper[4812]: I1125 17:14:10.573039 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-qxh2k" Nov 25 17:14:10 crc kubenswrapper[4812]: I1125 17:14:10.831662 4812 scope.go:117] "RemoveContainer" containerID="ae34a05aa38c6a99c86afe89c120c203f727ec066fc3aabb6dbd6d38ccbc4ae5" Nov 25 17:14:10 crc kubenswrapper[4812]: E1125 17:14:10.832026 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:14:11 crc kubenswrapper[4812]: I1125 17:14:11.032987 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-qpt4v"] Nov 25 17:14:11 crc kubenswrapper[4812]: I1125 17:14:11.041763 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-qpt4v"] Nov 25 17:14:11 crc kubenswrapper[4812]: I1125 17:14:11.120431 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-qxh2k"] Nov 25 17:14:11 crc kubenswrapper[4812]: W1125 17:14:11.125748 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd0fd4a0b_7e90_438d_89cf_f3dbc3da2b71.slice/crio-8a5f0261d41642f66f99515d64950bcf0557aba991b91ecabbedc679e73e34f2 WatchSource:0}: Error finding container 8a5f0261d41642f66f99515d64950bcf0557aba991b91ecabbedc679e73e34f2: Status 404 returned error can't find the container with id 8a5f0261d41642f66f99515d64950bcf0557aba991b91ecabbedc679e73e34f2 Nov 25 17:14:11 crc kubenswrapper[4812]: I1125 17:14:11.164524 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-qxh2k" event={"ID":"d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71","Type":"ContainerStarted","Data":"8a5f0261d41642f66f99515d64950bcf0557aba991b91ecabbedc679e73e34f2"} Nov 25 17:14:11 crc kubenswrapper[4812]: I1125 17:14:11.842843 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc9bcdae-4537-427d-a3f1-064ae62d7b62" path="/var/lib/kubelet/pods/cc9bcdae-4537-427d-a3f1-064ae62d7b62/volumes" Nov 25 17:14:12 crc kubenswrapper[4812]: I1125 17:14:12.174873 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-qxh2k" event={"ID":"d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71","Type":"ContainerStarted","Data":"bc775ca91f89af73440faa7a36a4895a30c998ec768624452c98d855eb3736ca"} Nov 25 17:14:12 crc kubenswrapper[4812]: I1125 17:14:12.197910 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-qxh2k" podStartSLOduration=1.746639745 podStartE2EDuration="2.197894596s" podCreationTimestamp="2025-11-25 17:14:10 +0000 UTC" firstStartedPulling="2025-11-25 17:14:11.136664958 +0000 UTC m=+1625.976807053" lastFinishedPulling="2025-11-25 17:14:11.587919809 +0000 UTC m=+1626.428061904" observedRunningTime="2025-11-25 17:14:12.188629745 +0000 UTC m=+1627.028771860" watchObservedRunningTime="2025-11-25 17:14:12.197894596 +0000 UTC m=+1627.038036691" Nov 25 17:14:25 crc kubenswrapper[4812]: I1125 17:14:25.836950 4812 scope.go:117] "RemoveContainer" containerID="ae34a05aa38c6a99c86afe89c120c203f727ec066fc3aabb6dbd6d38ccbc4ae5" Nov 25 17:14:25 crc kubenswrapper[4812]: E1125 17:14:25.838720 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:14:38 crc kubenswrapper[4812]: I1125 17:14:38.832860 4812 scope.go:117] "RemoveContainer" containerID="ae34a05aa38c6a99c86afe89c120c203f727ec066fc3aabb6dbd6d38ccbc4ae5" Nov 25 17:14:38 crc kubenswrapper[4812]: E1125 17:14:38.833734 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:14:48 crc kubenswrapper[4812]: I1125 17:14:48.467933 4812 generic.go:334] "Generic (PLEG): container finished" podID="d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71" containerID="bc775ca91f89af73440faa7a36a4895a30c998ec768624452c98d855eb3736ca" exitCode=0 Nov 25 17:14:48 crc kubenswrapper[4812]: I1125 17:14:48.468005 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-qxh2k" event={"ID":"d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71","Type":"ContainerDied","Data":"bc775ca91f89af73440faa7a36a4895a30c998ec768624452c98d855eb3736ca"} Nov 25 17:14:50 crc kubenswrapper[4812]: I1125 17:14:49.833231 4812 scope.go:117] "RemoveContainer" containerID="ae34a05aa38c6a99c86afe89c120c203f727ec066fc3aabb6dbd6d38ccbc4ae5" Nov 25 17:14:50 crc kubenswrapper[4812]: E1125 17:14:49.835001 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:14:50 crc kubenswrapper[4812]: I1125 17:14:49.872685 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-qxh2k" Nov 25 17:14:50 crc kubenswrapper[4812]: I1125 17:14:50.058310 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71-ssh-key\") pod \"d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71\" (UID: \"d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71\") " Nov 25 17:14:50 crc kubenswrapper[4812]: I1125 17:14:50.058362 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71-inventory\") pod \"d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71\" (UID: \"d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71\") " Nov 25 17:14:50 crc kubenswrapper[4812]: I1125 17:14:50.058580 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5f8kl\" (UniqueName: \"kubernetes.io/projected/d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71-kube-api-access-5f8kl\") pod \"d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71\" (UID: \"d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71\") " Nov 25 17:14:50 crc kubenswrapper[4812]: I1125 17:14:50.071590 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71-kube-api-access-5f8kl" (OuterVolumeSpecName: "kube-api-access-5f8kl") pod "d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71" (UID: "d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71"). InnerVolumeSpecName "kube-api-access-5f8kl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:14:50 crc kubenswrapper[4812]: I1125 17:14:50.089634 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71" (UID: "d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:14:50 crc kubenswrapper[4812]: I1125 17:14:50.089679 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71-inventory" (OuterVolumeSpecName: "inventory") pod "d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71" (UID: "d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:14:50 crc kubenswrapper[4812]: I1125 17:14:50.160093 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5f8kl\" (UniqueName: \"kubernetes.io/projected/d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71-kube-api-access-5f8kl\") on node \"crc\" DevicePath \"\"" Nov 25 17:14:50 crc kubenswrapper[4812]: I1125 17:14:50.160331 4812 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 17:14:50 crc kubenswrapper[4812]: I1125 17:14:50.160406 4812 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 17:14:50 crc kubenswrapper[4812]: I1125 17:14:50.487245 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-qxh2k" event={"ID":"d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71","Type":"ContainerDied","Data":"8a5f0261d41642f66f99515d64950bcf0557aba991b91ecabbedc679e73e34f2"} Nov 25 17:14:50 crc kubenswrapper[4812]: I1125 17:14:50.487288 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8a5f0261d41642f66f99515d64950bcf0557aba991b91ecabbedc679e73e34f2" Nov 25 17:14:50 crc kubenswrapper[4812]: I1125 17:14:50.487318 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-qxh2k" Nov 25 17:14:50 crc kubenswrapper[4812]: I1125 17:14:50.558986 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-hdhx8"] Nov 25 17:14:50 crc kubenswrapper[4812]: E1125 17:14:50.569365 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 25 17:14:50 crc kubenswrapper[4812]: I1125 17:14:50.569396 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 25 17:14:50 crc kubenswrapper[4812]: I1125 17:14:50.569764 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 25 17:14:50 crc kubenswrapper[4812]: I1125 17:14:50.570329 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-hdhx8"] Nov 25 17:14:50 crc kubenswrapper[4812]: I1125 17:14:50.570412 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-hdhx8" Nov 25 17:14:50 crc kubenswrapper[4812]: I1125 17:14:50.573233 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 17:14:50 crc kubenswrapper[4812]: I1125 17:14:50.573245 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 17:14:50 crc kubenswrapper[4812]: I1125 17:14:50.573345 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wtld4" Nov 25 17:14:50 crc kubenswrapper[4812]: I1125 17:14:50.573674 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 17:14:50 crc kubenswrapper[4812]: I1125 17:14:50.668811 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j95l7\" (UniqueName: \"kubernetes.io/projected/1abf20b0-08ce-41f6-bef8-f05278504d3e-kube-api-access-j95l7\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-hdhx8\" (UID: \"1abf20b0-08ce-41f6-bef8-f05278504d3e\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-hdhx8" Nov 25 17:14:50 crc kubenswrapper[4812]: I1125 17:14:50.668958 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1abf20b0-08ce-41f6-bef8-f05278504d3e-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-hdhx8\" (UID: \"1abf20b0-08ce-41f6-bef8-f05278504d3e\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-hdhx8" Nov 25 17:14:50 crc kubenswrapper[4812]: I1125 17:14:50.669007 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1abf20b0-08ce-41f6-bef8-f05278504d3e-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-hdhx8\" (UID: \"1abf20b0-08ce-41f6-bef8-f05278504d3e\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-hdhx8" Nov 25 17:14:50 crc kubenswrapper[4812]: I1125 17:14:50.770601 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1abf20b0-08ce-41f6-bef8-f05278504d3e-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-hdhx8\" (UID: \"1abf20b0-08ce-41f6-bef8-f05278504d3e\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-hdhx8" Nov 25 17:14:50 crc kubenswrapper[4812]: I1125 17:14:50.770673 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1abf20b0-08ce-41f6-bef8-f05278504d3e-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-hdhx8\" (UID: \"1abf20b0-08ce-41f6-bef8-f05278504d3e\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-hdhx8" Nov 25 17:14:50 crc kubenswrapper[4812]: I1125 17:14:50.770774 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j95l7\" (UniqueName: \"kubernetes.io/projected/1abf20b0-08ce-41f6-bef8-f05278504d3e-kube-api-access-j95l7\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-hdhx8\" (UID: \"1abf20b0-08ce-41f6-bef8-f05278504d3e\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-hdhx8" Nov 25 17:14:50 crc kubenswrapper[4812]: I1125 17:14:50.774609 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1abf20b0-08ce-41f6-bef8-f05278504d3e-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-hdhx8\" (UID: \"1abf20b0-08ce-41f6-bef8-f05278504d3e\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-hdhx8" Nov 25 17:14:50 crc kubenswrapper[4812]: I1125 17:14:50.774963 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1abf20b0-08ce-41f6-bef8-f05278504d3e-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-hdhx8\" (UID: \"1abf20b0-08ce-41f6-bef8-f05278504d3e\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-hdhx8" Nov 25 17:14:50 crc kubenswrapper[4812]: I1125 17:14:50.788158 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j95l7\" (UniqueName: \"kubernetes.io/projected/1abf20b0-08ce-41f6-bef8-f05278504d3e-kube-api-access-j95l7\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-hdhx8\" (UID: \"1abf20b0-08ce-41f6-bef8-f05278504d3e\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-hdhx8" Nov 25 17:14:50 crc kubenswrapper[4812]: I1125 17:14:50.891214 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-hdhx8" Nov 25 17:14:51 crc kubenswrapper[4812]: I1125 17:14:51.399484 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-hdhx8"] Nov 25 17:14:51 crc kubenswrapper[4812]: I1125 17:14:51.497461 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-hdhx8" event={"ID":"1abf20b0-08ce-41f6-bef8-f05278504d3e","Type":"ContainerStarted","Data":"c33b955576578954ab1e6e770bc3266913d6f6ecf55f7d9d0b885244fff46369"} Nov 25 17:14:52 crc kubenswrapper[4812]: I1125 17:14:52.506979 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-hdhx8" event={"ID":"1abf20b0-08ce-41f6-bef8-f05278504d3e","Type":"ContainerStarted","Data":"3d9833a6b5030da3335af7b4a4871113fe6616049684cda574a47844932dd9ed"} Nov 25 17:14:52 crc kubenswrapper[4812]: I1125 17:14:52.531891 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-hdhx8" podStartSLOduration=1.962813258 podStartE2EDuration="2.531871467s" podCreationTimestamp="2025-11-25 17:14:50 +0000 UTC" firstStartedPulling="2025-11-25 17:14:51.406062711 +0000 UTC m=+1666.246204806" lastFinishedPulling="2025-11-25 17:14:51.97512093 +0000 UTC m=+1666.815263015" observedRunningTime="2025-11-25 17:14:52.524767135 +0000 UTC m=+1667.364909230" watchObservedRunningTime="2025-11-25 17:14:52.531871467 +0000 UTC m=+1667.372013572" Nov 25 17:14:56 crc kubenswrapper[4812]: I1125 17:14:56.541150 4812 generic.go:334] "Generic (PLEG): container finished" podID="1abf20b0-08ce-41f6-bef8-f05278504d3e" containerID="3d9833a6b5030da3335af7b4a4871113fe6616049684cda574a47844932dd9ed" exitCode=0 Nov 25 17:14:56 crc kubenswrapper[4812]: I1125 17:14:56.541501 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-hdhx8" event={"ID":"1abf20b0-08ce-41f6-bef8-f05278504d3e","Type":"ContainerDied","Data":"3d9833a6b5030da3335af7b4a4871113fe6616049684cda574a47844932dd9ed"} Nov 25 17:14:57 crc kubenswrapper[4812]: I1125 17:14:57.046199 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-8k7d7"] Nov 25 17:14:57 crc kubenswrapper[4812]: I1125 17:14:57.055373 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-j4kgx"] Nov 25 17:14:57 crc kubenswrapper[4812]: I1125 17:14:57.065166 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-j4kgx"] Nov 25 17:14:57 crc kubenswrapper[4812]: I1125 17:14:57.075608 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-8k7d7"] Nov 25 17:14:57 crc kubenswrapper[4812]: I1125 17:14:57.842956 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7" path="/var/lib/kubelet/pods/b3e4a3f4-130a-4ebd-bdaf-8a1a897328d7/volumes" Nov 25 17:14:57 crc kubenswrapper[4812]: I1125 17:14:57.844307 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bb88081d-3888-485c-8105-d3ab11630457" path="/var/lib/kubelet/pods/bb88081d-3888-485c-8105-d3ab11630457/volumes" Nov 25 17:14:57 crc kubenswrapper[4812]: I1125 17:14:57.960766 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-hdhx8" Nov 25 17:14:58 crc kubenswrapper[4812]: I1125 17:14:58.010457 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1abf20b0-08ce-41f6-bef8-f05278504d3e-inventory\") pod \"1abf20b0-08ce-41f6-bef8-f05278504d3e\" (UID: \"1abf20b0-08ce-41f6-bef8-f05278504d3e\") " Nov 25 17:14:58 crc kubenswrapper[4812]: I1125 17:14:58.010606 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1abf20b0-08ce-41f6-bef8-f05278504d3e-ssh-key\") pod \"1abf20b0-08ce-41f6-bef8-f05278504d3e\" (UID: \"1abf20b0-08ce-41f6-bef8-f05278504d3e\") " Nov 25 17:14:58 crc kubenswrapper[4812]: I1125 17:14:58.010705 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j95l7\" (UniqueName: \"kubernetes.io/projected/1abf20b0-08ce-41f6-bef8-f05278504d3e-kube-api-access-j95l7\") pod \"1abf20b0-08ce-41f6-bef8-f05278504d3e\" (UID: \"1abf20b0-08ce-41f6-bef8-f05278504d3e\") " Nov 25 17:14:58 crc kubenswrapper[4812]: I1125 17:14:58.023394 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1abf20b0-08ce-41f6-bef8-f05278504d3e-kube-api-access-j95l7" (OuterVolumeSpecName: "kube-api-access-j95l7") pod "1abf20b0-08ce-41f6-bef8-f05278504d3e" (UID: "1abf20b0-08ce-41f6-bef8-f05278504d3e"). InnerVolumeSpecName "kube-api-access-j95l7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:14:58 crc kubenswrapper[4812]: I1125 17:14:58.040733 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-t5rs4"] Nov 25 17:14:58 crc kubenswrapper[4812]: I1125 17:14:58.046865 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1abf20b0-08ce-41f6-bef8-f05278504d3e-inventory" (OuterVolumeSpecName: "inventory") pod "1abf20b0-08ce-41f6-bef8-f05278504d3e" (UID: "1abf20b0-08ce-41f6-bef8-f05278504d3e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:14:58 crc kubenswrapper[4812]: I1125 17:14:58.049047 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1abf20b0-08ce-41f6-bef8-f05278504d3e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "1abf20b0-08ce-41f6-bef8-f05278504d3e" (UID: "1abf20b0-08ce-41f6-bef8-f05278504d3e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:14:58 crc kubenswrapper[4812]: I1125 17:14:58.050409 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-t5rs4"] Nov 25 17:14:58 crc kubenswrapper[4812]: I1125 17:14:58.113428 4812 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1abf20b0-08ce-41f6-bef8-f05278504d3e-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 17:14:58 crc kubenswrapper[4812]: I1125 17:14:58.113800 4812 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1abf20b0-08ce-41f6-bef8-f05278504d3e-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 17:14:58 crc kubenswrapper[4812]: I1125 17:14:58.113818 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j95l7\" (UniqueName: \"kubernetes.io/projected/1abf20b0-08ce-41f6-bef8-f05278504d3e-kube-api-access-j95l7\") on node \"crc\" DevicePath \"\"" Nov 25 17:14:58 crc kubenswrapper[4812]: I1125 17:14:58.564118 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-hdhx8" event={"ID":"1abf20b0-08ce-41f6-bef8-f05278504d3e","Type":"ContainerDied","Data":"c33b955576578954ab1e6e770bc3266913d6f6ecf55f7d9d0b885244fff46369"} Nov 25 17:14:58 crc kubenswrapper[4812]: I1125 17:14:58.564165 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c33b955576578954ab1e6e770bc3266913d6f6ecf55f7d9d0b885244fff46369" Nov 25 17:14:58 crc kubenswrapper[4812]: I1125 17:14:58.564215 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-hdhx8" Nov 25 17:14:58 crc kubenswrapper[4812]: I1125 17:14:58.626765 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-k8jv6"] Nov 25 17:14:58 crc kubenswrapper[4812]: E1125 17:14:58.627109 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1abf20b0-08ce-41f6-bef8-f05278504d3e" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 25 17:14:58 crc kubenswrapper[4812]: I1125 17:14:58.627126 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="1abf20b0-08ce-41f6-bef8-f05278504d3e" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 25 17:14:58 crc kubenswrapper[4812]: I1125 17:14:58.627312 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="1abf20b0-08ce-41f6-bef8-f05278504d3e" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 25 17:14:58 crc kubenswrapper[4812]: I1125 17:14:58.627885 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-k8jv6" Nov 25 17:14:58 crc kubenswrapper[4812]: I1125 17:14:58.631553 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wtld4" Nov 25 17:14:58 crc kubenswrapper[4812]: I1125 17:14:58.633109 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 17:14:58 crc kubenswrapper[4812]: I1125 17:14:58.633329 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 17:14:58 crc kubenswrapper[4812]: I1125 17:14:58.633342 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 17:14:58 crc kubenswrapper[4812]: I1125 17:14:58.638335 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-k8jv6"] Nov 25 17:14:58 crc kubenswrapper[4812]: I1125 17:14:58.724110 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ff5sk\" (UniqueName: \"kubernetes.io/projected/dc406ce2-c683-4b49-97d4-960f2781afd2-kube-api-access-ff5sk\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-k8jv6\" (UID: \"dc406ce2-c683-4b49-97d4-960f2781afd2\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-k8jv6" Nov 25 17:14:58 crc kubenswrapper[4812]: I1125 17:14:58.724190 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/dc406ce2-c683-4b49-97d4-960f2781afd2-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-k8jv6\" (UID: \"dc406ce2-c683-4b49-97d4-960f2781afd2\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-k8jv6" Nov 25 17:14:58 crc kubenswrapper[4812]: I1125 17:14:58.724302 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dc406ce2-c683-4b49-97d4-960f2781afd2-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-k8jv6\" (UID: \"dc406ce2-c683-4b49-97d4-960f2781afd2\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-k8jv6" Nov 25 17:14:58 crc kubenswrapper[4812]: I1125 17:14:58.825856 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dc406ce2-c683-4b49-97d4-960f2781afd2-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-k8jv6\" (UID: \"dc406ce2-c683-4b49-97d4-960f2781afd2\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-k8jv6" Nov 25 17:14:58 crc kubenswrapper[4812]: I1125 17:14:58.825931 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ff5sk\" (UniqueName: \"kubernetes.io/projected/dc406ce2-c683-4b49-97d4-960f2781afd2-kube-api-access-ff5sk\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-k8jv6\" (UID: \"dc406ce2-c683-4b49-97d4-960f2781afd2\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-k8jv6" Nov 25 17:14:58 crc kubenswrapper[4812]: I1125 17:14:58.825994 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/dc406ce2-c683-4b49-97d4-960f2781afd2-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-k8jv6\" (UID: \"dc406ce2-c683-4b49-97d4-960f2781afd2\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-k8jv6" Nov 25 17:14:58 crc kubenswrapper[4812]: I1125 17:14:58.831036 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dc406ce2-c683-4b49-97d4-960f2781afd2-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-k8jv6\" (UID: \"dc406ce2-c683-4b49-97d4-960f2781afd2\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-k8jv6" Nov 25 17:14:58 crc kubenswrapper[4812]: I1125 17:14:58.833344 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/dc406ce2-c683-4b49-97d4-960f2781afd2-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-k8jv6\" (UID: \"dc406ce2-c683-4b49-97d4-960f2781afd2\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-k8jv6" Nov 25 17:14:58 crc kubenswrapper[4812]: I1125 17:14:58.843132 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ff5sk\" (UniqueName: \"kubernetes.io/projected/dc406ce2-c683-4b49-97d4-960f2781afd2-kube-api-access-ff5sk\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-k8jv6\" (UID: \"dc406ce2-c683-4b49-97d4-960f2781afd2\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-k8jv6" Nov 25 17:14:58 crc kubenswrapper[4812]: I1125 17:14:58.946050 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-k8jv6" Nov 25 17:14:59 crc kubenswrapper[4812]: I1125 17:14:59.419166 4812 scope.go:117] "RemoveContainer" containerID="ad7559301665deafaf1f25165010182b34f3f7e759e0f0a19006c94e8d981908" Nov 25 17:14:59 crc kubenswrapper[4812]: I1125 17:14:59.438878 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-k8jv6"] Nov 25 17:14:59 crc kubenswrapper[4812]: I1125 17:14:59.460369 4812 scope.go:117] "RemoveContainer" containerID="c5c80dc3c209e5141991b6c4de7812add55f88c1162c7e2772aa0b2f9bf77918" Nov 25 17:14:59 crc kubenswrapper[4812]: I1125 17:14:59.520784 4812 scope.go:117] "RemoveContainer" containerID="a75baceb301573ab7802e7e7cc3342ff61ca92b4781545fc53200578e03318ad" Nov 25 17:14:59 crc kubenswrapper[4812]: I1125 17:14:59.545783 4812 scope.go:117] "RemoveContainer" containerID="bb01911dc1d7950253380ece32fe4578f6aaeb8137d0f039a8a42d48d4fe820f" Nov 25 17:14:59 crc kubenswrapper[4812]: I1125 17:14:59.562479 4812 scope.go:117] "RemoveContainer" containerID="021f14bfccfbb38ca1b21a11feadfd808345ca858a903d9605536976645bdb6e" Nov 25 17:14:59 crc kubenswrapper[4812]: I1125 17:14:59.574460 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-k8jv6" event={"ID":"dc406ce2-c683-4b49-97d4-960f2781afd2","Type":"ContainerStarted","Data":"4ad13b3aec22c7bcd269084293bd017200c290d96734649191939ca18b77adf9"} Nov 25 17:14:59 crc kubenswrapper[4812]: I1125 17:14:59.582399 4812 scope.go:117] "RemoveContainer" containerID="319bbb71a6231da31df1197c5c410bd7a43dae2e9d7adf6316a9491e4db366ec" Nov 25 17:14:59 crc kubenswrapper[4812]: I1125 17:14:59.606908 4812 scope.go:117] "RemoveContainer" containerID="4fa2c39f8ad70fc602ed1cd99aaaa438049bca404d0bde3c32d9f0525f10e667" Nov 25 17:14:59 crc kubenswrapper[4812]: I1125 17:14:59.626406 4812 scope.go:117] "RemoveContainer" containerID="12c65c0b0c807733b0334461386820f6982dee7654077de6ea6b529bbe33c74c" Nov 25 17:14:59 crc kubenswrapper[4812]: I1125 17:14:59.653401 4812 scope.go:117] "RemoveContainer" containerID="9aa82f4b758e0c1d98cd7c211d55a1373488e8f2b352f694a96993aad2d8aab8" Nov 25 17:14:59 crc kubenswrapper[4812]: I1125 17:14:59.842653 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f63bc45-88e5-4074-b40d-a2741fa63339" path="/var/lib/kubelet/pods/3f63bc45-88e5-4074-b40d-a2741fa63339/volumes" Nov 25 17:15:00 crc kubenswrapper[4812]: I1125 17:15:00.154649 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401515-lb6pb"] Nov 25 17:15:00 crc kubenswrapper[4812]: I1125 17:15:00.156068 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401515-lb6pb" Nov 25 17:15:00 crc kubenswrapper[4812]: I1125 17:15:00.159108 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 17:15:00 crc kubenswrapper[4812]: I1125 17:15:00.159249 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 17:15:00 crc kubenswrapper[4812]: I1125 17:15:00.181791 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401515-lb6pb"] Nov 25 17:15:00 crc kubenswrapper[4812]: I1125 17:15:00.252644 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6w4v\" (UniqueName: \"kubernetes.io/projected/a83003d5-18a9-482d-8348-b4e3995a8db3-kube-api-access-g6w4v\") pod \"collect-profiles-29401515-lb6pb\" (UID: \"a83003d5-18a9-482d-8348-b4e3995a8db3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401515-lb6pb" Nov 25 17:15:00 crc kubenswrapper[4812]: I1125 17:15:00.252688 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a83003d5-18a9-482d-8348-b4e3995a8db3-config-volume\") pod \"collect-profiles-29401515-lb6pb\" (UID: \"a83003d5-18a9-482d-8348-b4e3995a8db3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401515-lb6pb" Nov 25 17:15:00 crc kubenswrapper[4812]: I1125 17:15:00.252806 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a83003d5-18a9-482d-8348-b4e3995a8db3-secret-volume\") pod \"collect-profiles-29401515-lb6pb\" (UID: \"a83003d5-18a9-482d-8348-b4e3995a8db3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401515-lb6pb" Nov 25 17:15:00 crc kubenswrapper[4812]: I1125 17:15:00.354355 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a83003d5-18a9-482d-8348-b4e3995a8db3-secret-volume\") pod \"collect-profiles-29401515-lb6pb\" (UID: \"a83003d5-18a9-482d-8348-b4e3995a8db3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401515-lb6pb" Nov 25 17:15:00 crc kubenswrapper[4812]: I1125 17:15:00.354482 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g6w4v\" (UniqueName: \"kubernetes.io/projected/a83003d5-18a9-482d-8348-b4e3995a8db3-kube-api-access-g6w4v\") pod \"collect-profiles-29401515-lb6pb\" (UID: \"a83003d5-18a9-482d-8348-b4e3995a8db3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401515-lb6pb" Nov 25 17:15:00 crc kubenswrapper[4812]: I1125 17:15:00.354505 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a83003d5-18a9-482d-8348-b4e3995a8db3-config-volume\") pod \"collect-profiles-29401515-lb6pb\" (UID: \"a83003d5-18a9-482d-8348-b4e3995a8db3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401515-lb6pb" Nov 25 17:15:00 crc kubenswrapper[4812]: I1125 17:15:00.356388 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a83003d5-18a9-482d-8348-b4e3995a8db3-config-volume\") pod \"collect-profiles-29401515-lb6pb\" (UID: \"a83003d5-18a9-482d-8348-b4e3995a8db3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401515-lb6pb" Nov 25 17:15:00 crc kubenswrapper[4812]: I1125 17:15:00.360156 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a83003d5-18a9-482d-8348-b4e3995a8db3-secret-volume\") pod \"collect-profiles-29401515-lb6pb\" (UID: \"a83003d5-18a9-482d-8348-b4e3995a8db3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401515-lb6pb" Nov 25 17:15:00 crc kubenswrapper[4812]: I1125 17:15:00.378398 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g6w4v\" (UniqueName: \"kubernetes.io/projected/a83003d5-18a9-482d-8348-b4e3995a8db3-kube-api-access-g6w4v\") pod \"collect-profiles-29401515-lb6pb\" (UID: \"a83003d5-18a9-482d-8348-b4e3995a8db3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401515-lb6pb" Nov 25 17:15:00 crc kubenswrapper[4812]: I1125 17:15:00.476358 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401515-lb6pb" Nov 25 17:15:00 crc kubenswrapper[4812]: I1125 17:15:00.596435 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-k8jv6" event={"ID":"dc406ce2-c683-4b49-97d4-960f2781afd2","Type":"ContainerStarted","Data":"ab79174a1b7656220f755fc746961c6e91846695342237af85e771b4f7cfd6cf"} Nov 25 17:15:00 crc kubenswrapper[4812]: I1125 17:15:00.620351 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-k8jv6" podStartSLOduration=2.163724824 podStartE2EDuration="2.62033092s" podCreationTimestamp="2025-11-25 17:14:58 +0000 UTC" firstStartedPulling="2025-11-25 17:14:59.470480074 +0000 UTC m=+1674.310622159" lastFinishedPulling="2025-11-25 17:14:59.92708616 +0000 UTC m=+1674.767228255" observedRunningTime="2025-11-25 17:15:00.615830108 +0000 UTC m=+1675.455972203" watchObservedRunningTime="2025-11-25 17:15:00.62033092 +0000 UTC m=+1675.460473015" Nov 25 17:15:00 crc kubenswrapper[4812]: I1125 17:15:00.925469 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401515-lb6pb"] Nov 25 17:15:01 crc kubenswrapper[4812]: I1125 17:15:01.606185 4812 generic.go:334] "Generic (PLEG): container finished" podID="a83003d5-18a9-482d-8348-b4e3995a8db3" containerID="e85de82af017d7961d1942a62302bddb2fbc6a8a96d25f6ba11a7298968e5715" exitCode=0 Nov 25 17:15:01 crc kubenswrapper[4812]: I1125 17:15:01.606689 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401515-lb6pb" event={"ID":"a83003d5-18a9-482d-8348-b4e3995a8db3","Type":"ContainerDied","Data":"e85de82af017d7961d1942a62302bddb2fbc6a8a96d25f6ba11a7298968e5715"} Nov 25 17:15:01 crc kubenswrapper[4812]: I1125 17:15:01.606718 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401515-lb6pb" event={"ID":"a83003d5-18a9-482d-8348-b4e3995a8db3","Type":"ContainerStarted","Data":"03b86bcccba81fe3e3c7f280e34a79ac2e5714fe70cc8f3038ad9afd9ef891d7"} Nov 25 17:15:01 crc kubenswrapper[4812]: I1125 17:15:01.831648 4812 scope.go:117] "RemoveContainer" containerID="ae34a05aa38c6a99c86afe89c120c203f727ec066fc3aabb6dbd6d38ccbc4ae5" Nov 25 17:15:01 crc kubenswrapper[4812]: E1125 17:15:01.831868 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:15:02 crc kubenswrapper[4812]: I1125 17:15:02.924949 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401515-lb6pb" Nov 25 17:15:03 crc kubenswrapper[4812]: I1125 17:15:03.018011 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a83003d5-18a9-482d-8348-b4e3995a8db3-secret-volume\") pod \"a83003d5-18a9-482d-8348-b4e3995a8db3\" (UID: \"a83003d5-18a9-482d-8348-b4e3995a8db3\") " Nov 25 17:15:03 crc kubenswrapper[4812]: I1125 17:15:03.018142 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g6w4v\" (UniqueName: \"kubernetes.io/projected/a83003d5-18a9-482d-8348-b4e3995a8db3-kube-api-access-g6w4v\") pod \"a83003d5-18a9-482d-8348-b4e3995a8db3\" (UID: \"a83003d5-18a9-482d-8348-b4e3995a8db3\") " Nov 25 17:15:03 crc kubenswrapper[4812]: I1125 17:15:03.018381 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a83003d5-18a9-482d-8348-b4e3995a8db3-config-volume\") pod \"a83003d5-18a9-482d-8348-b4e3995a8db3\" (UID: \"a83003d5-18a9-482d-8348-b4e3995a8db3\") " Nov 25 17:15:03 crc kubenswrapper[4812]: I1125 17:15:03.019263 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a83003d5-18a9-482d-8348-b4e3995a8db3-config-volume" (OuterVolumeSpecName: "config-volume") pod "a83003d5-18a9-482d-8348-b4e3995a8db3" (UID: "a83003d5-18a9-482d-8348-b4e3995a8db3"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:15:03 crc kubenswrapper[4812]: I1125 17:15:03.023354 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a83003d5-18a9-482d-8348-b4e3995a8db3-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "a83003d5-18a9-482d-8348-b4e3995a8db3" (UID: "a83003d5-18a9-482d-8348-b4e3995a8db3"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:15:03 crc kubenswrapper[4812]: I1125 17:15:03.029904 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a83003d5-18a9-482d-8348-b4e3995a8db3-kube-api-access-g6w4v" (OuterVolumeSpecName: "kube-api-access-g6w4v") pod "a83003d5-18a9-482d-8348-b4e3995a8db3" (UID: "a83003d5-18a9-482d-8348-b4e3995a8db3"). InnerVolumeSpecName "kube-api-access-g6w4v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:15:03 crc kubenswrapper[4812]: I1125 17:15:03.120479 4812 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a83003d5-18a9-482d-8348-b4e3995a8db3-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 17:15:03 crc kubenswrapper[4812]: I1125 17:15:03.120563 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g6w4v\" (UniqueName: \"kubernetes.io/projected/a83003d5-18a9-482d-8348-b4e3995a8db3-kube-api-access-g6w4v\") on node \"crc\" DevicePath \"\"" Nov 25 17:15:03 crc kubenswrapper[4812]: I1125 17:15:03.120579 4812 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a83003d5-18a9-482d-8348-b4e3995a8db3-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 17:15:03 crc kubenswrapper[4812]: I1125 17:15:03.626735 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401515-lb6pb" event={"ID":"a83003d5-18a9-482d-8348-b4e3995a8db3","Type":"ContainerDied","Data":"03b86bcccba81fe3e3c7f280e34a79ac2e5714fe70cc8f3038ad9afd9ef891d7"} Nov 25 17:15:03 crc kubenswrapper[4812]: I1125 17:15:03.626838 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="03b86bcccba81fe3e3c7f280e34a79ac2e5714fe70cc8f3038ad9afd9ef891d7" Nov 25 17:15:03 crc kubenswrapper[4812]: I1125 17:15:03.626836 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401515-lb6pb" Nov 25 17:15:08 crc kubenswrapper[4812]: I1125 17:15:08.028908 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-f9kvr"] Nov 25 17:15:08 crc kubenswrapper[4812]: I1125 17:15:08.038382 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-f9kvr"] Nov 25 17:15:09 crc kubenswrapper[4812]: I1125 17:15:09.847448 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="046e94e3-a63c-490b-8fb1-db6592742208" path="/var/lib/kubelet/pods/046e94e3-a63c-490b-8fb1-db6592742208/volumes" Nov 25 17:15:14 crc kubenswrapper[4812]: I1125 17:15:14.832131 4812 scope.go:117] "RemoveContainer" containerID="ae34a05aa38c6a99c86afe89c120c203f727ec066fc3aabb6dbd6d38ccbc4ae5" Nov 25 17:15:14 crc kubenswrapper[4812]: E1125 17:15:14.832886 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:15:25 crc kubenswrapper[4812]: I1125 17:15:25.045784 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-xz5j9"] Nov 25 17:15:25 crc kubenswrapper[4812]: I1125 17:15:25.056403 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-xz5j9"] Nov 25 17:15:25 crc kubenswrapper[4812]: I1125 17:15:25.849181 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee5fe32b-eefd-4847-a053-b72c9f06e3b1" path="/var/lib/kubelet/pods/ee5fe32b-eefd-4847-a053-b72c9f06e3b1/volumes" Nov 25 17:15:28 crc kubenswrapper[4812]: I1125 17:15:28.831834 4812 scope.go:117] "RemoveContainer" containerID="ae34a05aa38c6a99c86afe89c120c203f727ec066fc3aabb6dbd6d38ccbc4ae5" Nov 25 17:15:28 crc kubenswrapper[4812]: E1125 17:15:28.832311 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:15:43 crc kubenswrapper[4812]: I1125 17:15:43.842424 4812 scope.go:117] "RemoveContainer" containerID="ae34a05aa38c6a99c86afe89c120c203f727ec066fc3aabb6dbd6d38ccbc4ae5" Nov 25 17:15:43 crc kubenswrapper[4812]: E1125 17:15:43.843446 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:15:47 crc kubenswrapper[4812]: I1125 17:15:47.047270 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cff9-account-create-772bb"] Nov 25 17:15:47 crc kubenswrapper[4812]: I1125 17:15:47.058653 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-2tnh5"] Nov 25 17:15:47 crc kubenswrapper[4812]: I1125 17:15:47.066708 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-8jrgd"] Nov 25 17:15:47 crc kubenswrapper[4812]: I1125 17:15:47.077783 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cff9-account-create-772bb"] Nov 25 17:15:47 crc kubenswrapper[4812]: I1125 17:15:47.085396 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-2tnh5"] Nov 25 17:15:47 crc kubenswrapper[4812]: I1125 17:15:47.093184 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-pgm8z"] Nov 25 17:15:47 crc kubenswrapper[4812]: I1125 17:15:47.102261 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-2c67-account-create-2dzrx"] Nov 25 17:15:47 crc kubenswrapper[4812]: I1125 17:15:47.111590 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-66bc-account-create-9fxvs"] Nov 25 17:15:47 crc kubenswrapper[4812]: I1125 17:15:47.144457 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-2c67-account-create-2dzrx"] Nov 25 17:15:47 crc kubenswrapper[4812]: I1125 17:15:47.153126 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-66bc-account-create-9fxvs"] Nov 25 17:15:47 crc kubenswrapper[4812]: I1125 17:15:47.162013 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-8jrgd"] Nov 25 17:15:47 crc kubenswrapper[4812]: I1125 17:15:47.169220 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-pgm8z"] Nov 25 17:15:47 crc kubenswrapper[4812]: I1125 17:15:47.841921 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a32a31d-c114-4e2d-a8d8-5a6c0f0e518b" path="/var/lib/kubelet/pods/0a32a31d-c114-4e2d-a8d8-5a6c0f0e518b/volumes" Nov 25 17:15:47 crc kubenswrapper[4812]: I1125 17:15:47.842721 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ffbf750-9ce8-454e-a031-eb09ebaa34a5" path="/var/lib/kubelet/pods/3ffbf750-9ce8-454e-a031-eb09ebaa34a5/volumes" Nov 25 17:15:47 crc kubenswrapper[4812]: I1125 17:15:47.843332 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="64f71340-850a-4cf0-880a-5760dc7e1c4f" path="/var/lib/kubelet/pods/64f71340-850a-4cf0-880a-5760dc7e1c4f/volumes" Nov 25 17:15:47 crc kubenswrapper[4812]: I1125 17:15:47.843915 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ba390cc-ed29-4af9-b3b5-582f5c3de736" path="/var/lib/kubelet/pods/6ba390cc-ed29-4af9-b3b5-582f5c3de736/volumes" Nov 25 17:15:47 crc kubenswrapper[4812]: I1125 17:15:47.845091 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="941bf985-0c3b-4972-b4fe-182f891de32f" path="/var/lib/kubelet/pods/941bf985-0c3b-4972-b4fe-182f891de32f/volumes" Nov 25 17:15:47 crc kubenswrapper[4812]: I1125 17:15:47.845755 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d4d2ad77-439d-4039-98b1-f26eadcc542e" path="/var/lib/kubelet/pods/d4d2ad77-439d-4039-98b1-f26eadcc542e/volumes" Nov 25 17:15:47 crc kubenswrapper[4812]: I1125 17:15:47.992443 4812 generic.go:334] "Generic (PLEG): container finished" podID="dc406ce2-c683-4b49-97d4-960f2781afd2" containerID="ab79174a1b7656220f755fc746961c6e91846695342237af85e771b4f7cfd6cf" exitCode=0 Nov 25 17:15:47 crc kubenswrapper[4812]: I1125 17:15:47.992520 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-k8jv6" event={"ID":"dc406ce2-c683-4b49-97d4-960f2781afd2","Type":"ContainerDied","Data":"ab79174a1b7656220f755fc746961c6e91846695342237af85e771b4f7cfd6cf"} Nov 25 17:15:49 crc kubenswrapper[4812]: I1125 17:15:49.382471 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-k8jv6" Nov 25 17:15:49 crc kubenswrapper[4812]: I1125 17:15:49.528993 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ff5sk\" (UniqueName: \"kubernetes.io/projected/dc406ce2-c683-4b49-97d4-960f2781afd2-kube-api-access-ff5sk\") pod \"dc406ce2-c683-4b49-97d4-960f2781afd2\" (UID: \"dc406ce2-c683-4b49-97d4-960f2781afd2\") " Nov 25 17:15:49 crc kubenswrapper[4812]: I1125 17:15:49.529139 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/dc406ce2-c683-4b49-97d4-960f2781afd2-ssh-key\") pod \"dc406ce2-c683-4b49-97d4-960f2781afd2\" (UID: \"dc406ce2-c683-4b49-97d4-960f2781afd2\") " Nov 25 17:15:49 crc kubenswrapper[4812]: I1125 17:15:49.529208 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dc406ce2-c683-4b49-97d4-960f2781afd2-inventory\") pod \"dc406ce2-c683-4b49-97d4-960f2781afd2\" (UID: \"dc406ce2-c683-4b49-97d4-960f2781afd2\") " Nov 25 17:15:49 crc kubenswrapper[4812]: I1125 17:15:49.534623 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc406ce2-c683-4b49-97d4-960f2781afd2-kube-api-access-ff5sk" (OuterVolumeSpecName: "kube-api-access-ff5sk") pod "dc406ce2-c683-4b49-97d4-960f2781afd2" (UID: "dc406ce2-c683-4b49-97d4-960f2781afd2"). InnerVolumeSpecName "kube-api-access-ff5sk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:15:49 crc kubenswrapper[4812]: I1125 17:15:49.554106 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc406ce2-c683-4b49-97d4-960f2781afd2-inventory" (OuterVolumeSpecName: "inventory") pod "dc406ce2-c683-4b49-97d4-960f2781afd2" (UID: "dc406ce2-c683-4b49-97d4-960f2781afd2"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:15:49 crc kubenswrapper[4812]: I1125 17:15:49.558088 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc406ce2-c683-4b49-97d4-960f2781afd2-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "dc406ce2-c683-4b49-97d4-960f2781afd2" (UID: "dc406ce2-c683-4b49-97d4-960f2781afd2"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:15:49 crc kubenswrapper[4812]: I1125 17:15:49.631650 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ff5sk\" (UniqueName: \"kubernetes.io/projected/dc406ce2-c683-4b49-97d4-960f2781afd2-kube-api-access-ff5sk\") on node \"crc\" DevicePath \"\"" Nov 25 17:15:49 crc kubenswrapper[4812]: I1125 17:15:49.631703 4812 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/dc406ce2-c683-4b49-97d4-960f2781afd2-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 17:15:49 crc kubenswrapper[4812]: I1125 17:15:49.631726 4812 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dc406ce2-c683-4b49-97d4-960f2781afd2-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 17:15:50 crc kubenswrapper[4812]: I1125 17:15:50.013219 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-k8jv6" event={"ID":"dc406ce2-c683-4b49-97d4-960f2781afd2","Type":"ContainerDied","Data":"4ad13b3aec22c7bcd269084293bd017200c290d96734649191939ca18b77adf9"} Nov 25 17:15:50 crc kubenswrapper[4812]: I1125 17:15:50.013261 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4ad13b3aec22c7bcd269084293bd017200c290d96734649191939ca18b77adf9" Nov 25 17:15:50 crc kubenswrapper[4812]: I1125 17:15:50.013280 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-k8jv6" Nov 25 17:15:50 crc kubenswrapper[4812]: I1125 17:15:50.077283 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-nfpkd"] Nov 25 17:15:50 crc kubenswrapper[4812]: E1125 17:15:50.077650 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a83003d5-18a9-482d-8348-b4e3995a8db3" containerName="collect-profiles" Nov 25 17:15:50 crc kubenswrapper[4812]: I1125 17:15:50.077668 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="a83003d5-18a9-482d-8348-b4e3995a8db3" containerName="collect-profiles" Nov 25 17:15:50 crc kubenswrapper[4812]: E1125 17:15:50.077689 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc406ce2-c683-4b49-97d4-960f2781afd2" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 25 17:15:50 crc kubenswrapper[4812]: I1125 17:15:50.077702 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc406ce2-c683-4b49-97d4-960f2781afd2" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 25 17:15:50 crc kubenswrapper[4812]: I1125 17:15:50.077884 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc406ce2-c683-4b49-97d4-960f2781afd2" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 25 17:15:50 crc kubenswrapper[4812]: I1125 17:15:50.077910 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="a83003d5-18a9-482d-8348-b4e3995a8db3" containerName="collect-profiles" Nov 25 17:15:50 crc kubenswrapper[4812]: I1125 17:15:50.078444 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-nfpkd" Nov 25 17:15:50 crc kubenswrapper[4812]: I1125 17:15:50.080366 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 17:15:50 crc kubenswrapper[4812]: I1125 17:15:50.080842 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 17:15:50 crc kubenswrapper[4812]: I1125 17:15:50.081039 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 17:15:50 crc kubenswrapper[4812]: I1125 17:15:50.081735 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wtld4" Nov 25 17:15:50 crc kubenswrapper[4812]: I1125 17:15:50.089243 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-nfpkd"] Nov 25 17:15:50 crc kubenswrapper[4812]: I1125 17:15:50.241424 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x495d\" (UniqueName: \"kubernetes.io/projected/72a6d538-face-4c13-8818-fc6b83371e81-kube-api-access-x495d\") pod \"ssh-known-hosts-edpm-deployment-nfpkd\" (UID: \"72a6d538-face-4c13-8818-fc6b83371e81\") " pod="openstack/ssh-known-hosts-edpm-deployment-nfpkd" Nov 25 17:15:50 crc kubenswrapper[4812]: I1125 17:15:50.241585 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/72a6d538-face-4c13-8818-fc6b83371e81-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-nfpkd\" (UID: \"72a6d538-face-4c13-8818-fc6b83371e81\") " pod="openstack/ssh-known-hosts-edpm-deployment-nfpkd" Nov 25 17:15:50 crc kubenswrapper[4812]: I1125 17:15:50.241791 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/72a6d538-face-4c13-8818-fc6b83371e81-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-nfpkd\" (UID: \"72a6d538-face-4c13-8818-fc6b83371e81\") " pod="openstack/ssh-known-hosts-edpm-deployment-nfpkd" Nov 25 17:15:50 crc kubenswrapper[4812]: I1125 17:15:50.343839 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x495d\" (UniqueName: \"kubernetes.io/projected/72a6d538-face-4c13-8818-fc6b83371e81-kube-api-access-x495d\") pod \"ssh-known-hosts-edpm-deployment-nfpkd\" (UID: \"72a6d538-face-4c13-8818-fc6b83371e81\") " pod="openstack/ssh-known-hosts-edpm-deployment-nfpkd" Nov 25 17:15:50 crc kubenswrapper[4812]: I1125 17:15:50.344041 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/72a6d538-face-4c13-8818-fc6b83371e81-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-nfpkd\" (UID: \"72a6d538-face-4c13-8818-fc6b83371e81\") " pod="openstack/ssh-known-hosts-edpm-deployment-nfpkd" Nov 25 17:15:50 crc kubenswrapper[4812]: I1125 17:15:50.344208 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/72a6d538-face-4c13-8818-fc6b83371e81-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-nfpkd\" (UID: \"72a6d538-face-4c13-8818-fc6b83371e81\") " pod="openstack/ssh-known-hosts-edpm-deployment-nfpkd" Nov 25 17:15:50 crc kubenswrapper[4812]: I1125 17:15:50.352747 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/72a6d538-face-4c13-8818-fc6b83371e81-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-nfpkd\" (UID: \"72a6d538-face-4c13-8818-fc6b83371e81\") " pod="openstack/ssh-known-hosts-edpm-deployment-nfpkd" Nov 25 17:15:50 crc kubenswrapper[4812]: I1125 17:15:50.360406 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/72a6d538-face-4c13-8818-fc6b83371e81-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-nfpkd\" (UID: \"72a6d538-face-4c13-8818-fc6b83371e81\") " pod="openstack/ssh-known-hosts-edpm-deployment-nfpkd" Nov 25 17:15:50 crc kubenswrapper[4812]: I1125 17:15:50.363216 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x495d\" (UniqueName: \"kubernetes.io/projected/72a6d538-face-4c13-8818-fc6b83371e81-kube-api-access-x495d\") pod \"ssh-known-hosts-edpm-deployment-nfpkd\" (UID: \"72a6d538-face-4c13-8818-fc6b83371e81\") " pod="openstack/ssh-known-hosts-edpm-deployment-nfpkd" Nov 25 17:15:50 crc kubenswrapper[4812]: I1125 17:15:50.399462 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-nfpkd" Nov 25 17:15:50 crc kubenswrapper[4812]: I1125 17:15:50.918793 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-nfpkd"] Nov 25 17:15:51 crc kubenswrapper[4812]: I1125 17:15:51.035390 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-nfpkd" event={"ID":"72a6d538-face-4c13-8818-fc6b83371e81","Type":"ContainerStarted","Data":"ea61d5d4a8541ba47bd3faba31a876adea1d7c097f4667fd1f692a6e43dfda5d"} Nov 25 17:15:52 crc kubenswrapper[4812]: I1125 17:15:52.045912 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-nfpkd" event={"ID":"72a6d538-face-4c13-8818-fc6b83371e81","Type":"ContainerStarted","Data":"e8a939ace6426592fb9b95880fc998fc42af21d5d088da79b05fd1c461c6fbe8"} Nov 25 17:15:52 crc kubenswrapper[4812]: I1125 17:15:52.072456 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-nfpkd" podStartSLOduration=1.529707001 podStartE2EDuration="2.072438879s" podCreationTimestamp="2025-11-25 17:15:50 +0000 UTC" firstStartedPulling="2025-11-25 17:15:50.92191806 +0000 UTC m=+1725.762060155" lastFinishedPulling="2025-11-25 17:15:51.464649928 +0000 UTC m=+1726.304792033" observedRunningTime="2025-11-25 17:15:52.065377339 +0000 UTC m=+1726.905519434" watchObservedRunningTime="2025-11-25 17:15:52.072438879 +0000 UTC m=+1726.912580974" Nov 25 17:15:56 crc kubenswrapper[4812]: I1125 17:15:56.832373 4812 scope.go:117] "RemoveContainer" containerID="ae34a05aa38c6a99c86afe89c120c203f727ec066fc3aabb6dbd6d38ccbc4ae5" Nov 25 17:15:56 crc kubenswrapper[4812]: E1125 17:15:56.833147 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:15:58 crc kubenswrapper[4812]: I1125 17:15:58.094004 4812 generic.go:334] "Generic (PLEG): container finished" podID="72a6d538-face-4c13-8818-fc6b83371e81" containerID="e8a939ace6426592fb9b95880fc998fc42af21d5d088da79b05fd1c461c6fbe8" exitCode=0 Nov 25 17:15:58 crc kubenswrapper[4812]: I1125 17:15:58.094059 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-nfpkd" event={"ID":"72a6d538-face-4c13-8818-fc6b83371e81","Type":"ContainerDied","Data":"e8a939ace6426592fb9b95880fc998fc42af21d5d088da79b05fd1c461c6fbe8"} Nov 25 17:15:59 crc kubenswrapper[4812]: I1125 17:15:59.519042 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-nfpkd" Nov 25 17:15:59 crc kubenswrapper[4812]: I1125 17:15:59.621861 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x495d\" (UniqueName: \"kubernetes.io/projected/72a6d538-face-4c13-8818-fc6b83371e81-kube-api-access-x495d\") pod \"72a6d538-face-4c13-8818-fc6b83371e81\" (UID: \"72a6d538-face-4c13-8818-fc6b83371e81\") " Nov 25 17:15:59 crc kubenswrapper[4812]: I1125 17:15:59.621961 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/72a6d538-face-4c13-8818-fc6b83371e81-inventory-0\") pod \"72a6d538-face-4c13-8818-fc6b83371e81\" (UID: \"72a6d538-face-4c13-8818-fc6b83371e81\") " Nov 25 17:15:59 crc kubenswrapper[4812]: I1125 17:15:59.622102 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/72a6d538-face-4c13-8818-fc6b83371e81-ssh-key-openstack-edpm-ipam\") pod \"72a6d538-face-4c13-8818-fc6b83371e81\" (UID: \"72a6d538-face-4c13-8818-fc6b83371e81\") " Nov 25 17:15:59 crc kubenswrapper[4812]: I1125 17:15:59.628061 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/72a6d538-face-4c13-8818-fc6b83371e81-kube-api-access-x495d" (OuterVolumeSpecName: "kube-api-access-x495d") pod "72a6d538-face-4c13-8818-fc6b83371e81" (UID: "72a6d538-face-4c13-8818-fc6b83371e81"). InnerVolumeSpecName "kube-api-access-x495d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:15:59 crc kubenswrapper[4812]: I1125 17:15:59.648169 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/72a6d538-face-4c13-8818-fc6b83371e81-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "72a6d538-face-4c13-8818-fc6b83371e81" (UID: "72a6d538-face-4c13-8818-fc6b83371e81"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:15:59 crc kubenswrapper[4812]: I1125 17:15:59.650815 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/72a6d538-face-4c13-8818-fc6b83371e81-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "72a6d538-face-4c13-8818-fc6b83371e81" (UID: "72a6d538-face-4c13-8818-fc6b83371e81"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:15:59 crc kubenswrapper[4812]: I1125 17:15:59.723622 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x495d\" (UniqueName: \"kubernetes.io/projected/72a6d538-face-4c13-8818-fc6b83371e81-kube-api-access-x495d\") on node \"crc\" DevicePath \"\"" Nov 25 17:15:59 crc kubenswrapper[4812]: I1125 17:15:59.723667 4812 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/72a6d538-face-4c13-8818-fc6b83371e81-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 25 17:15:59 crc kubenswrapper[4812]: I1125 17:15:59.723686 4812 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/72a6d538-face-4c13-8818-fc6b83371e81-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 25 17:15:59 crc kubenswrapper[4812]: I1125 17:15:59.819489 4812 scope.go:117] "RemoveContainer" containerID="16f8e292b902a2f39c821f6cd59071fe4f7c729ff120376c24b25bf887258729" Nov 25 17:15:59 crc kubenswrapper[4812]: I1125 17:15:59.848782 4812 scope.go:117] "RemoveContainer" containerID="8ea359b43672d054928c8e68d05f43fa720edd480d3734062ce481f2c051d289" Nov 25 17:15:59 crc kubenswrapper[4812]: I1125 17:15:59.893513 4812 scope.go:117] "RemoveContainer" containerID="7ede419f7de410b3b355315b75aa48ca8908060061bd4733ab07a85e39aefab6" Nov 25 17:15:59 crc kubenswrapper[4812]: I1125 17:15:59.911789 4812 scope.go:117] "RemoveContainer" containerID="cf259ffa2d6cfff5e626a60c9259f7b88c8610d83737e49f48dc219365ce6512" Nov 25 17:15:59 crc kubenswrapper[4812]: I1125 17:15:59.928973 4812 scope.go:117] "RemoveContainer" containerID="749d71bc9d70c64e1e650dd6acc3ee5198ff40adf1d41c141775b91a57048ead" Nov 25 17:15:59 crc kubenswrapper[4812]: I1125 17:15:59.956676 4812 scope.go:117] "RemoveContainer" containerID="5bba6f465dbc381cff57a71703a8e3ef80f656c6591947820c0797452ab14730" Nov 25 17:15:59 crc kubenswrapper[4812]: I1125 17:15:59.973633 4812 scope.go:117] "RemoveContainer" containerID="16b082e6594e10bff3c6e967d7fb7d3b81f5eb29c1cb6fe7e0f42347cb6a4353" Nov 25 17:16:00 crc kubenswrapper[4812]: I1125 17:16:00.004893 4812 scope.go:117] "RemoveContainer" containerID="c11d340edffb8007ba2bef28b7c0676a9322cee13cd7905a314cf4d9553da4bc" Nov 25 17:16:00 crc kubenswrapper[4812]: I1125 17:16:00.022227 4812 scope.go:117] "RemoveContainer" containerID="ddc9240f16a4c291bc708104e69390f49d6d05f043cd815df2fde700fb198348" Nov 25 17:16:00 crc kubenswrapper[4812]: I1125 17:16:00.115200 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-nfpkd" event={"ID":"72a6d538-face-4c13-8818-fc6b83371e81","Type":"ContainerDied","Data":"ea61d5d4a8541ba47bd3faba31a876adea1d7c097f4667fd1f692a6e43dfda5d"} Nov 25 17:16:00 crc kubenswrapper[4812]: I1125 17:16:00.115221 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-nfpkd" Nov 25 17:16:00 crc kubenswrapper[4812]: I1125 17:16:00.115244 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ea61d5d4a8541ba47bd3faba31a876adea1d7c097f4667fd1f692a6e43dfda5d" Nov 25 17:16:00 crc kubenswrapper[4812]: I1125 17:16:00.176292 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-q5z6z"] Nov 25 17:16:00 crc kubenswrapper[4812]: E1125 17:16:00.176754 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72a6d538-face-4c13-8818-fc6b83371e81" containerName="ssh-known-hosts-edpm-deployment" Nov 25 17:16:00 crc kubenswrapper[4812]: I1125 17:16:00.176766 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="72a6d538-face-4c13-8818-fc6b83371e81" containerName="ssh-known-hosts-edpm-deployment" Nov 25 17:16:00 crc kubenswrapper[4812]: I1125 17:16:00.176941 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="72a6d538-face-4c13-8818-fc6b83371e81" containerName="ssh-known-hosts-edpm-deployment" Nov 25 17:16:00 crc kubenswrapper[4812]: I1125 17:16:00.177550 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-q5z6z" Nov 25 17:16:00 crc kubenswrapper[4812]: I1125 17:16:00.181827 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 17:16:00 crc kubenswrapper[4812]: I1125 17:16:00.182052 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 17:16:00 crc kubenswrapper[4812]: I1125 17:16:00.182081 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wtld4" Nov 25 17:16:00 crc kubenswrapper[4812]: I1125 17:16:00.182099 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 17:16:00 crc kubenswrapper[4812]: I1125 17:16:00.188468 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-q5z6z"] Nov 25 17:16:00 crc kubenswrapper[4812]: I1125 17:16:00.234570 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/de33fc0f-e990-4ac0-b80b-86732e2f9298-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-q5z6z\" (UID: \"de33fc0f-e990-4ac0-b80b-86732e2f9298\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-q5z6z" Nov 25 17:16:00 crc kubenswrapper[4812]: I1125 17:16:00.234910 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/de33fc0f-e990-4ac0-b80b-86732e2f9298-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-q5z6z\" (UID: \"de33fc0f-e990-4ac0-b80b-86732e2f9298\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-q5z6z" Nov 25 17:16:00 crc kubenswrapper[4812]: I1125 17:16:00.234989 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4q5l4\" (UniqueName: \"kubernetes.io/projected/de33fc0f-e990-4ac0-b80b-86732e2f9298-kube-api-access-4q5l4\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-q5z6z\" (UID: \"de33fc0f-e990-4ac0-b80b-86732e2f9298\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-q5z6z" Nov 25 17:16:00 crc kubenswrapper[4812]: I1125 17:16:00.336644 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/de33fc0f-e990-4ac0-b80b-86732e2f9298-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-q5z6z\" (UID: \"de33fc0f-e990-4ac0-b80b-86732e2f9298\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-q5z6z" Nov 25 17:16:00 crc kubenswrapper[4812]: I1125 17:16:00.336696 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4q5l4\" (UniqueName: \"kubernetes.io/projected/de33fc0f-e990-4ac0-b80b-86732e2f9298-kube-api-access-4q5l4\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-q5z6z\" (UID: \"de33fc0f-e990-4ac0-b80b-86732e2f9298\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-q5z6z" Nov 25 17:16:00 crc kubenswrapper[4812]: I1125 17:16:00.336766 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/de33fc0f-e990-4ac0-b80b-86732e2f9298-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-q5z6z\" (UID: \"de33fc0f-e990-4ac0-b80b-86732e2f9298\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-q5z6z" Nov 25 17:16:00 crc kubenswrapper[4812]: I1125 17:16:00.340503 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/de33fc0f-e990-4ac0-b80b-86732e2f9298-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-q5z6z\" (UID: \"de33fc0f-e990-4ac0-b80b-86732e2f9298\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-q5z6z" Nov 25 17:16:00 crc kubenswrapper[4812]: I1125 17:16:00.340724 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/de33fc0f-e990-4ac0-b80b-86732e2f9298-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-q5z6z\" (UID: \"de33fc0f-e990-4ac0-b80b-86732e2f9298\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-q5z6z" Nov 25 17:16:00 crc kubenswrapper[4812]: I1125 17:16:00.352415 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4q5l4\" (UniqueName: \"kubernetes.io/projected/de33fc0f-e990-4ac0-b80b-86732e2f9298-kube-api-access-4q5l4\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-q5z6z\" (UID: \"de33fc0f-e990-4ac0-b80b-86732e2f9298\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-q5z6z" Nov 25 17:16:00 crc kubenswrapper[4812]: I1125 17:16:00.499628 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-q5z6z" Nov 25 17:16:01 crc kubenswrapper[4812]: I1125 17:16:01.008872 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-q5z6z"] Nov 25 17:16:01 crc kubenswrapper[4812]: I1125 17:16:01.141859 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-q5z6z" event={"ID":"de33fc0f-e990-4ac0-b80b-86732e2f9298","Type":"ContainerStarted","Data":"da1f3dceae4ac9e0e021fd424234894b968fa0ce6c634a8d0ba4d187b73851fb"} Nov 25 17:16:02 crc kubenswrapper[4812]: I1125 17:16:02.158110 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-q5z6z" event={"ID":"de33fc0f-e990-4ac0-b80b-86732e2f9298","Type":"ContainerStarted","Data":"01cb517fafac99171c64e593506726af83c10df3bb73c3aa6f42fd4bd97aa78d"} Nov 25 17:16:02 crc kubenswrapper[4812]: I1125 17:16:02.179770 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-q5z6z" podStartSLOduration=1.788408837 podStartE2EDuration="2.179750551s" podCreationTimestamp="2025-11-25 17:16:00 +0000 UTC" firstStartedPulling="2025-11-25 17:16:01.015490871 +0000 UTC m=+1735.855632966" lastFinishedPulling="2025-11-25 17:16:01.406832585 +0000 UTC m=+1736.246974680" observedRunningTime="2025-11-25 17:16:02.177250083 +0000 UTC m=+1737.017392198" watchObservedRunningTime="2025-11-25 17:16:02.179750551 +0000 UTC m=+1737.019892646" Nov 25 17:16:10 crc kubenswrapper[4812]: I1125 17:16:10.232140 4812 generic.go:334] "Generic (PLEG): container finished" podID="de33fc0f-e990-4ac0-b80b-86732e2f9298" containerID="01cb517fafac99171c64e593506726af83c10df3bb73c3aa6f42fd4bd97aa78d" exitCode=0 Nov 25 17:16:10 crc kubenswrapper[4812]: I1125 17:16:10.232219 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-q5z6z" event={"ID":"de33fc0f-e990-4ac0-b80b-86732e2f9298","Type":"ContainerDied","Data":"01cb517fafac99171c64e593506726af83c10df3bb73c3aa6f42fd4bd97aa78d"} Nov 25 17:16:10 crc kubenswrapper[4812]: I1125 17:16:10.832298 4812 scope.go:117] "RemoveContainer" containerID="ae34a05aa38c6a99c86afe89c120c203f727ec066fc3aabb6dbd6d38ccbc4ae5" Nov 25 17:16:10 crc kubenswrapper[4812]: E1125 17:16:10.832748 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:16:11 crc kubenswrapper[4812]: I1125 17:16:11.678323 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-q5z6z" Nov 25 17:16:11 crc kubenswrapper[4812]: I1125 17:16:11.749809 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/de33fc0f-e990-4ac0-b80b-86732e2f9298-inventory\") pod \"de33fc0f-e990-4ac0-b80b-86732e2f9298\" (UID: \"de33fc0f-e990-4ac0-b80b-86732e2f9298\") " Nov 25 17:16:11 crc kubenswrapper[4812]: I1125 17:16:11.749861 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4q5l4\" (UniqueName: \"kubernetes.io/projected/de33fc0f-e990-4ac0-b80b-86732e2f9298-kube-api-access-4q5l4\") pod \"de33fc0f-e990-4ac0-b80b-86732e2f9298\" (UID: \"de33fc0f-e990-4ac0-b80b-86732e2f9298\") " Nov 25 17:16:11 crc kubenswrapper[4812]: I1125 17:16:11.749917 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/de33fc0f-e990-4ac0-b80b-86732e2f9298-ssh-key\") pod \"de33fc0f-e990-4ac0-b80b-86732e2f9298\" (UID: \"de33fc0f-e990-4ac0-b80b-86732e2f9298\") " Nov 25 17:16:11 crc kubenswrapper[4812]: I1125 17:16:11.757227 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de33fc0f-e990-4ac0-b80b-86732e2f9298-kube-api-access-4q5l4" (OuterVolumeSpecName: "kube-api-access-4q5l4") pod "de33fc0f-e990-4ac0-b80b-86732e2f9298" (UID: "de33fc0f-e990-4ac0-b80b-86732e2f9298"). InnerVolumeSpecName "kube-api-access-4q5l4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:16:11 crc kubenswrapper[4812]: I1125 17:16:11.779166 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de33fc0f-e990-4ac0-b80b-86732e2f9298-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "de33fc0f-e990-4ac0-b80b-86732e2f9298" (UID: "de33fc0f-e990-4ac0-b80b-86732e2f9298"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:16:11 crc kubenswrapper[4812]: I1125 17:16:11.799278 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de33fc0f-e990-4ac0-b80b-86732e2f9298-inventory" (OuterVolumeSpecName: "inventory") pod "de33fc0f-e990-4ac0-b80b-86732e2f9298" (UID: "de33fc0f-e990-4ac0-b80b-86732e2f9298"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:16:11 crc kubenswrapper[4812]: I1125 17:16:11.852017 4812 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/de33fc0f-e990-4ac0-b80b-86732e2f9298-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 17:16:11 crc kubenswrapper[4812]: I1125 17:16:11.852255 4812 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/de33fc0f-e990-4ac0-b80b-86732e2f9298-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 17:16:11 crc kubenswrapper[4812]: I1125 17:16:11.852315 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4q5l4\" (UniqueName: \"kubernetes.io/projected/de33fc0f-e990-4ac0-b80b-86732e2f9298-kube-api-access-4q5l4\") on node \"crc\" DevicePath \"\"" Nov 25 17:16:12 crc kubenswrapper[4812]: I1125 17:16:12.253203 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-q5z6z" event={"ID":"de33fc0f-e990-4ac0-b80b-86732e2f9298","Type":"ContainerDied","Data":"da1f3dceae4ac9e0e021fd424234894b968fa0ce6c634a8d0ba4d187b73851fb"} Nov 25 17:16:12 crc kubenswrapper[4812]: I1125 17:16:12.253241 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="da1f3dceae4ac9e0e021fd424234894b968fa0ce6c634a8d0ba4d187b73851fb" Nov 25 17:16:12 crc kubenswrapper[4812]: I1125 17:16:12.253313 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-q5z6z" Nov 25 17:16:12 crc kubenswrapper[4812]: I1125 17:16:12.394860 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p6pfv"] Nov 25 17:16:12 crc kubenswrapper[4812]: E1125 17:16:12.395326 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de33fc0f-e990-4ac0-b80b-86732e2f9298" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 25 17:16:12 crc kubenswrapper[4812]: I1125 17:16:12.395349 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="de33fc0f-e990-4ac0-b80b-86732e2f9298" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 25 17:16:12 crc kubenswrapper[4812]: I1125 17:16:12.395579 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="de33fc0f-e990-4ac0-b80b-86732e2f9298" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 25 17:16:12 crc kubenswrapper[4812]: I1125 17:16:12.396308 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p6pfv" Nov 25 17:16:12 crc kubenswrapper[4812]: I1125 17:16:12.398649 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 17:16:12 crc kubenswrapper[4812]: I1125 17:16:12.398884 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 17:16:12 crc kubenswrapper[4812]: I1125 17:16:12.399734 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 17:16:12 crc kubenswrapper[4812]: I1125 17:16:12.399790 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wtld4" Nov 25 17:16:12 crc kubenswrapper[4812]: I1125 17:16:12.404815 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p6pfv"] Nov 25 17:16:12 crc kubenswrapper[4812]: I1125 17:16:12.460072 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b963aa9e-100f-48a4-afd6-b7b84649baa1-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-p6pfv\" (UID: \"b963aa9e-100f-48a4-afd6-b7b84649baa1\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p6pfv" Nov 25 17:16:12 crc kubenswrapper[4812]: I1125 17:16:12.460137 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b963aa9e-100f-48a4-afd6-b7b84649baa1-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-p6pfv\" (UID: \"b963aa9e-100f-48a4-afd6-b7b84649baa1\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p6pfv" Nov 25 17:16:12 crc kubenswrapper[4812]: I1125 17:16:12.460224 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4fl85\" (UniqueName: \"kubernetes.io/projected/b963aa9e-100f-48a4-afd6-b7b84649baa1-kube-api-access-4fl85\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-p6pfv\" (UID: \"b963aa9e-100f-48a4-afd6-b7b84649baa1\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p6pfv" Nov 25 17:16:12 crc kubenswrapper[4812]: I1125 17:16:12.562655 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b963aa9e-100f-48a4-afd6-b7b84649baa1-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-p6pfv\" (UID: \"b963aa9e-100f-48a4-afd6-b7b84649baa1\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p6pfv" Nov 25 17:16:12 crc kubenswrapper[4812]: I1125 17:16:12.562706 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b963aa9e-100f-48a4-afd6-b7b84649baa1-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-p6pfv\" (UID: \"b963aa9e-100f-48a4-afd6-b7b84649baa1\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p6pfv" Nov 25 17:16:12 crc kubenswrapper[4812]: I1125 17:16:12.562781 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4fl85\" (UniqueName: \"kubernetes.io/projected/b963aa9e-100f-48a4-afd6-b7b84649baa1-kube-api-access-4fl85\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-p6pfv\" (UID: \"b963aa9e-100f-48a4-afd6-b7b84649baa1\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p6pfv" Nov 25 17:16:12 crc kubenswrapper[4812]: I1125 17:16:12.566748 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b963aa9e-100f-48a4-afd6-b7b84649baa1-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-p6pfv\" (UID: \"b963aa9e-100f-48a4-afd6-b7b84649baa1\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p6pfv" Nov 25 17:16:12 crc kubenswrapper[4812]: I1125 17:16:12.570727 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b963aa9e-100f-48a4-afd6-b7b84649baa1-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-p6pfv\" (UID: \"b963aa9e-100f-48a4-afd6-b7b84649baa1\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p6pfv" Nov 25 17:16:12 crc kubenswrapper[4812]: I1125 17:16:12.586562 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4fl85\" (UniqueName: \"kubernetes.io/projected/b963aa9e-100f-48a4-afd6-b7b84649baa1-kube-api-access-4fl85\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-p6pfv\" (UID: \"b963aa9e-100f-48a4-afd6-b7b84649baa1\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p6pfv" Nov 25 17:16:12 crc kubenswrapper[4812]: I1125 17:16:12.714901 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p6pfv" Nov 25 17:16:13 crc kubenswrapper[4812]: I1125 17:16:13.222666 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p6pfv"] Nov 25 17:16:13 crc kubenswrapper[4812]: W1125 17:16:13.223183 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb963aa9e_100f_48a4_afd6_b7b84649baa1.slice/crio-438dcbf317b5770b46c42d1256ea3291a49d8c6236db5517967017d79354afd6 WatchSource:0}: Error finding container 438dcbf317b5770b46c42d1256ea3291a49d8c6236db5517967017d79354afd6: Status 404 returned error can't find the container with id 438dcbf317b5770b46c42d1256ea3291a49d8c6236db5517967017d79354afd6 Nov 25 17:16:13 crc kubenswrapper[4812]: I1125 17:16:13.271170 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p6pfv" event={"ID":"b963aa9e-100f-48a4-afd6-b7b84649baa1","Type":"ContainerStarted","Data":"438dcbf317b5770b46c42d1256ea3291a49d8c6236db5517967017d79354afd6"} Nov 25 17:16:14 crc kubenswrapper[4812]: I1125 17:16:14.280869 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p6pfv" event={"ID":"b963aa9e-100f-48a4-afd6-b7b84649baa1","Type":"ContainerStarted","Data":"5f59b7f13edac07b4be13a573a40afaa4b0c6f16fd798ba9b3c95d95c185aa2f"} Nov 25 17:16:14 crc kubenswrapper[4812]: I1125 17:16:14.307618 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p6pfv" podStartSLOduration=1.856812847 podStartE2EDuration="2.307600222s" podCreationTimestamp="2025-11-25 17:16:12 +0000 UTC" firstStartedPulling="2025-11-25 17:16:13.225910025 +0000 UTC m=+1748.066052120" lastFinishedPulling="2025-11-25 17:16:13.6766974 +0000 UTC m=+1748.516839495" observedRunningTime="2025-11-25 17:16:14.30268297 +0000 UTC m=+1749.142825085" watchObservedRunningTime="2025-11-25 17:16:14.307600222 +0000 UTC m=+1749.147742317" Nov 25 17:16:21 crc kubenswrapper[4812]: I1125 17:16:21.047093 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-mg5b8"] Nov 25 17:16:21 crc kubenswrapper[4812]: I1125 17:16:21.056678 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-mg5b8"] Nov 25 17:16:21 crc kubenswrapper[4812]: I1125 17:16:21.842209 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d944d86b-9bc4-4360-89ce-07220fc618ea" path="/var/lib/kubelet/pods/d944d86b-9bc4-4360-89ce-07220fc618ea/volumes" Nov 25 17:16:22 crc kubenswrapper[4812]: I1125 17:16:22.832105 4812 scope.go:117] "RemoveContainer" containerID="ae34a05aa38c6a99c86afe89c120c203f727ec066fc3aabb6dbd6d38ccbc4ae5" Nov 25 17:16:22 crc kubenswrapper[4812]: E1125 17:16:22.832411 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:16:23 crc kubenswrapper[4812]: I1125 17:16:23.368330 4812 generic.go:334] "Generic (PLEG): container finished" podID="b963aa9e-100f-48a4-afd6-b7b84649baa1" containerID="5f59b7f13edac07b4be13a573a40afaa4b0c6f16fd798ba9b3c95d95c185aa2f" exitCode=0 Nov 25 17:16:23 crc kubenswrapper[4812]: I1125 17:16:23.368462 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p6pfv" event={"ID":"b963aa9e-100f-48a4-afd6-b7b84649baa1","Type":"ContainerDied","Data":"5f59b7f13edac07b4be13a573a40afaa4b0c6f16fd798ba9b3c95d95c185aa2f"} Nov 25 17:16:24 crc kubenswrapper[4812]: I1125 17:16:24.758514 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p6pfv" Nov 25 17:16:24 crc kubenswrapper[4812]: I1125 17:16:24.783376 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b963aa9e-100f-48a4-afd6-b7b84649baa1-inventory\") pod \"b963aa9e-100f-48a4-afd6-b7b84649baa1\" (UID: \"b963aa9e-100f-48a4-afd6-b7b84649baa1\") " Nov 25 17:16:24 crc kubenswrapper[4812]: I1125 17:16:24.783901 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4fl85\" (UniqueName: \"kubernetes.io/projected/b963aa9e-100f-48a4-afd6-b7b84649baa1-kube-api-access-4fl85\") pod \"b963aa9e-100f-48a4-afd6-b7b84649baa1\" (UID: \"b963aa9e-100f-48a4-afd6-b7b84649baa1\") " Nov 25 17:16:24 crc kubenswrapper[4812]: I1125 17:16:24.783935 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b963aa9e-100f-48a4-afd6-b7b84649baa1-ssh-key\") pod \"b963aa9e-100f-48a4-afd6-b7b84649baa1\" (UID: \"b963aa9e-100f-48a4-afd6-b7b84649baa1\") " Nov 25 17:16:24 crc kubenswrapper[4812]: I1125 17:16:24.791769 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b963aa9e-100f-48a4-afd6-b7b84649baa1-kube-api-access-4fl85" (OuterVolumeSpecName: "kube-api-access-4fl85") pod "b963aa9e-100f-48a4-afd6-b7b84649baa1" (UID: "b963aa9e-100f-48a4-afd6-b7b84649baa1"). InnerVolumeSpecName "kube-api-access-4fl85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:16:24 crc kubenswrapper[4812]: I1125 17:16:24.809882 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b963aa9e-100f-48a4-afd6-b7b84649baa1-inventory" (OuterVolumeSpecName: "inventory") pod "b963aa9e-100f-48a4-afd6-b7b84649baa1" (UID: "b963aa9e-100f-48a4-afd6-b7b84649baa1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:16:24 crc kubenswrapper[4812]: I1125 17:16:24.810246 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b963aa9e-100f-48a4-afd6-b7b84649baa1-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "b963aa9e-100f-48a4-afd6-b7b84649baa1" (UID: "b963aa9e-100f-48a4-afd6-b7b84649baa1"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:16:24 crc kubenswrapper[4812]: I1125 17:16:24.886254 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4fl85\" (UniqueName: \"kubernetes.io/projected/b963aa9e-100f-48a4-afd6-b7b84649baa1-kube-api-access-4fl85\") on node \"crc\" DevicePath \"\"" Nov 25 17:16:24 crc kubenswrapper[4812]: I1125 17:16:24.886385 4812 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b963aa9e-100f-48a4-afd6-b7b84649baa1-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 17:16:24 crc kubenswrapper[4812]: I1125 17:16:24.886401 4812 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b963aa9e-100f-48a4-afd6-b7b84649baa1-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 17:16:25 crc kubenswrapper[4812]: I1125 17:16:25.387327 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p6pfv" event={"ID":"b963aa9e-100f-48a4-afd6-b7b84649baa1","Type":"ContainerDied","Data":"438dcbf317b5770b46c42d1256ea3291a49d8c6236db5517967017d79354afd6"} Nov 25 17:16:25 crc kubenswrapper[4812]: I1125 17:16:25.387590 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="438dcbf317b5770b46c42d1256ea3291a49d8c6236db5517967017d79354afd6" Nov 25 17:16:25 crc kubenswrapper[4812]: I1125 17:16:25.387388 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p6pfv" Nov 25 17:16:37 crc kubenswrapper[4812]: I1125 17:16:37.831594 4812 scope.go:117] "RemoveContainer" containerID="ae34a05aa38c6a99c86afe89c120c203f727ec066fc3aabb6dbd6d38ccbc4ae5" Nov 25 17:16:37 crc kubenswrapper[4812]: E1125 17:16:37.833380 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:16:43 crc kubenswrapper[4812]: I1125 17:16:43.046269 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-cr76j"] Nov 25 17:16:43 crc kubenswrapper[4812]: I1125 17:16:43.053982 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-cr76j"] Nov 25 17:16:43 crc kubenswrapper[4812]: I1125 17:16:43.848088 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="12587876-fcb8-487a-a197-0696ac90f57d" path="/var/lib/kubelet/pods/12587876-fcb8-487a-a197-0696ac90f57d/volumes" Nov 25 17:16:49 crc kubenswrapper[4812]: I1125 17:16:49.034626 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-2kj2k"] Nov 25 17:16:49 crc kubenswrapper[4812]: I1125 17:16:49.046926 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-2kj2k"] Nov 25 17:16:49 crc kubenswrapper[4812]: I1125 17:16:49.841553 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b37c593-c2f5-4304-bfeb-820d518bce9f" path="/var/lib/kubelet/pods/0b37c593-c2f5-4304-bfeb-820d518bce9f/volumes" Nov 25 17:16:52 crc kubenswrapper[4812]: I1125 17:16:52.831876 4812 scope.go:117] "RemoveContainer" containerID="ae34a05aa38c6a99c86afe89c120c203f727ec066fc3aabb6dbd6d38ccbc4ae5" Nov 25 17:16:52 crc kubenswrapper[4812]: E1125 17:16:52.832370 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:17:00 crc kubenswrapper[4812]: I1125 17:17:00.202445 4812 scope.go:117] "RemoveContainer" containerID="8ad5d89dd345e7340100873cdfdba709698c08cccb9980b09e786f24386e5949" Nov 25 17:17:00 crc kubenswrapper[4812]: I1125 17:17:00.229588 4812 scope.go:117] "RemoveContainer" containerID="a2a5f55ae19d352942c7ceb265494110ba04e0647736567a174f341cfda71271" Nov 25 17:17:00 crc kubenswrapper[4812]: I1125 17:17:00.279547 4812 scope.go:117] "RemoveContainer" containerID="de61b62cf3c6fbc1ac37cc3446a1c1d41accc0290a006a6d26dbae2ed0272711" Nov 25 17:17:00 crc kubenswrapper[4812]: I1125 17:17:00.317424 4812 scope.go:117] "RemoveContainer" containerID="05e90a883068b3bbd7169aa3a5de41d93dd52038953f351098c8e81af14c0bf9" Nov 25 17:17:03 crc kubenswrapper[4812]: I1125 17:17:03.832013 4812 scope.go:117] "RemoveContainer" containerID="ae34a05aa38c6a99c86afe89c120c203f727ec066fc3aabb6dbd6d38ccbc4ae5" Nov 25 17:17:03 crc kubenswrapper[4812]: E1125 17:17:03.832843 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:17:18 crc kubenswrapper[4812]: I1125 17:17:18.105287 4812 scope.go:117] "RemoveContainer" containerID="ae34a05aa38c6a99c86afe89c120c203f727ec066fc3aabb6dbd6d38ccbc4ae5" Nov 25 17:17:18 crc kubenswrapper[4812]: E1125 17:17:18.106145 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:17:28 crc kubenswrapper[4812]: I1125 17:17:28.041493 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-pwkn8"] Nov 25 17:17:28 crc kubenswrapper[4812]: I1125 17:17:28.050131 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-pwkn8"] Nov 25 17:17:29 crc kubenswrapper[4812]: I1125 17:17:29.831841 4812 scope.go:117] "RemoveContainer" containerID="ae34a05aa38c6a99c86afe89c120c203f727ec066fc3aabb6dbd6d38ccbc4ae5" Nov 25 17:17:29 crc kubenswrapper[4812]: E1125 17:17:29.832499 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:17:29 crc kubenswrapper[4812]: I1125 17:17:29.844978 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c6b3ec00-4b31-4c04-b90a-58d161c57811" path="/var/lib/kubelet/pods/c6b3ec00-4b31-4c04-b90a-58d161c57811/volumes" Nov 25 17:17:41 crc kubenswrapper[4812]: I1125 17:17:41.831245 4812 scope.go:117] "RemoveContainer" containerID="ae34a05aa38c6a99c86afe89c120c203f727ec066fc3aabb6dbd6d38ccbc4ae5" Nov 25 17:17:41 crc kubenswrapper[4812]: E1125 17:17:41.831884 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:17:55 crc kubenswrapper[4812]: I1125 17:17:55.836826 4812 scope.go:117] "RemoveContainer" containerID="ae34a05aa38c6a99c86afe89c120c203f727ec066fc3aabb6dbd6d38ccbc4ae5" Nov 25 17:17:55 crc kubenswrapper[4812]: E1125 17:17:55.837569 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:18:00 crc kubenswrapper[4812]: I1125 17:18:00.417588 4812 scope.go:117] "RemoveContainer" containerID="3e33fcdfbd7ac14e9fb59ced95c1013e9a8947b1f09d4f25e7c2ba22fd21cb38" Nov 25 17:18:07 crc kubenswrapper[4812]: I1125 17:18:07.832281 4812 scope.go:117] "RemoveContainer" containerID="ae34a05aa38c6a99c86afe89c120c203f727ec066fc3aabb6dbd6d38ccbc4ae5" Nov 25 17:18:07 crc kubenswrapper[4812]: E1125 17:18:07.833256 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:18:20 crc kubenswrapper[4812]: I1125 17:18:20.831685 4812 scope.go:117] "RemoveContainer" containerID="ae34a05aa38c6a99c86afe89c120c203f727ec066fc3aabb6dbd6d38ccbc4ae5" Nov 25 17:18:20 crc kubenswrapper[4812]: E1125 17:18:20.833457 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:18:32 crc kubenswrapper[4812]: I1125 17:18:32.831370 4812 scope.go:117] "RemoveContainer" containerID="ae34a05aa38c6a99c86afe89c120c203f727ec066fc3aabb6dbd6d38ccbc4ae5" Nov 25 17:18:32 crc kubenswrapper[4812]: E1125 17:18:32.832778 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:18:45 crc kubenswrapper[4812]: I1125 17:18:45.839053 4812 scope.go:117] "RemoveContainer" containerID="ae34a05aa38c6a99c86afe89c120c203f727ec066fc3aabb6dbd6d38ccbc4ae5" Nov 25 17:18:45 crc kubenswrapper[4812]: E1125 17:18:45.839829 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:18:56 crc kubenswrapper[4812]: I1125 17:18:56.832035 4812 scope.go:117] "RemoveContainer" containerID="ae34a05aa38c6a99c86afe89c120c203f727ec066fc3aabb6dbd6d38ccbc4ae5" Nov 25 17:18:56 crc kubenswrapper[4812]: E1125 17:18:56.832726 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:19:08 crc kubenswrapper[4812]: I1125 17:19:08.832060 4812 scope.go:117] "RemoveContainer" containerID="ae34a05aa38c6a99c86afe89c120c203f727ec066fc3aabb6dbd6d38ccbc4ae5" Nov 25 17:19:09 crc kubenswrapper[4812]: I1125 17:19:09.098281 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" event={"ID":"8ed911cf-2139-4b12-84ba-af635585ba29","Type":"ContainerStarted","Data":"1ce2565d170db8a46eee60e79be5aef9c670f9e94c93580922535f0b2f9ddf57"} Nov 25 17:20:03 crc kubenswrapper[4812]: I1125 17:20:03.092063 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-vxkbj"] Nov 25 17:20:03 crc kubenswrapper[4812]: I1125 17:20:03.102915 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-855kz"] Nov 25 17:20:03 crc kubenswrapper[4812]: I1125 17:20:03.110462 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xqw5m"] Nov 25 17:20:03 crc kubenswrapper[4812]: I1125 17:20:03.118509 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-855kz"] Nov 25 17:20:03 crc kubenswrapper[4812]: I1125 17:20:03.126911 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-xqw5m"] Nov 25 17:20:03 crc kubenswrapper[4812]: I1125 17:20:03.134713 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-vxkbj"] Nov 25 17:20:03 crc kubenswrapper[4812]: I1125 17:20:03.142427 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-hdhx8"] Nov 25 17:20:03 crc kubenswrapper[4812]: I1125 17:20:03.150523 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-qxh2k"] Nov 25 17:20:03 crc kubenswrapper[4812]: I1125 17:20:03.157771 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-k8jv6"] Nov 25 17:20:03 crc kubenswrapper[4812]: I1125 17:20:03.165010 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p6pfv"] Nov 25 17:20:03 crc kubenswrapper[4812]: I1125 17:20:03.171441 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-q5z6z"] Nov 25 17:20:03 crc kubenswrapper[4812]: I1125 17:20:03.178615 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qsf5l"] Nov 25 17:20:03 crc kubenswrapper[4812]: I1125 17:20:03.186855 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-nfpkd"] Nov 25 17:20:03 crc kubenswrapper[4812]: I1125 17:20:03.196332 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p6pfv"] Nov 25 17:20:03 crc kubenswrapper[4812]: I1125 17:20:03.203973 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-k8jv6"] Nov 25 17:20:03 crc kubenswrapper[4812]: I1125 17:20:03.211592 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-nfpkd"] Nov 25 17:20:03 crc kubenswrapper[4812]: I1125 17:20:03.218686 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qsf5l"] Nov 25 17:20:03 crc kubenswrapper[4812]: I1125 17:20:03.225911 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-hdhx8"] Nov 25 17:20:03 crc kubenswrapper[4812]: I1125 17:20:03.234460 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-q5z6z"] Nov 25 17:20:03 crc kubenswrapper[4812]: I1125 17:20:03.243846 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-qxh2k"] Nov 25 17:20:03 crc kubenswrapper[4812]: I1125 17:20:03.841826 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1abf20b0-08ce-41f6-bef8-f05278504d3e" path="/var/lib/kubelet/pods/1abf20b0-08ce-41f6-bef8-f05278504d3e/volumes" Nov 25 17:20:03 crc kubenswrapper[4812]: I1125 17:20:03.842923 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3dfa6352-9a39-4a26-9289-e861e371b922" path="/var/lib/kubelet/pods/3dfa6352-9a39-4a26-9289-e861e371b922/volumes" Nov 25 17:20:03 crc kubenswrapper[4812]: I1125 17:20:03.843567 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="72a6d538-face-4c13-8818-fc6b83371e81" path="/var/lib/kubelet/pods/72a6d538-face-4c13-8818-fc6b83371e81/volumes" Nov 25 17:20:03 crc kubenswrapper[4812]: I1125 17:20:03.844225 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96745b0b-7343-4676-b692-46a52e6cfcb4" path="/var/lib/kubelet/pods/96745b0b-7343-4676-b692-46a52e6cfcb4/volumes" Nov 25 17:20:03 crc kubenswrapper[4812]: I1125 17:20:03.845186 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9771507e-2a96-4539-ac2f-00a418f64803" path="/var/lib/kubelet/pods/9771507e-2a96-4539-ac2f-00a418f64803/volumes" Nov 25 17:20:03 crc kubenswrapper[4812]: I1125 17:20:03.845697 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b963aa9e-100f-48a4-afd6-b7b84649baa1" path="/var/lib/kubelet/pods/b963aa9e-100f-48a4-afd6-b7b84649baa1/volumes" Nov 25 17:20:03 crc kubenswrapper[4812]: I1125 17:20:03.846379 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71" path="/var/lib/kubelet/pods/d0fd4a0b-7e90-438d-89cf-f3dbc3da2b71/volumes" Nov 25 17:20:03 crc kubenswrapper[4812]: I1125 17:20:03.847307 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc406ce2-c683-4b49-97d4-960f2781afd2" path="/var/lib/kubelet/pods/dc406ce2-c683-4b49-97d4-960f2781afd2/volumes" Nov 25 17:20:03 crc kubenswrapper[4812]: I1125 17:20:03.847802 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de33fc0f-e990-4ac0-b80b-86732e2f9298" path="/var/lib/kubelet/pods/de33fc0f-e990-4ac0-b80b-86732e2f9298/volumes" Nov 25 17:20:03 crc kubenswrapper[4812]: I1125 17:20:03.848423 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dec7f299-d822-494a-9a86-351502541b77" path="/var/lib/kubelet/pods/dec7f299-d822-494a-9a86-351502541b77/volumes" Nov 25 17:20:09 crc kubenswrapper[4812]: I1125 17:20:09.132180 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh"] Nov 25 17:20:09 crc kubenswrapper[4812]: E1125 17:20:09.133101 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b963aa9e-100f-48a4-afd6-b7b84649baa1" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 25 17:20:09 crc kubenswrapper[4812]: I1125 17:20:09.133117 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="b963aa9e-100f-48a4-afd6-b7b84649baa1" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 25 17:20:09 crc kubenswrapper[4812]: I1125 17:20:09.133331 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="b963aa9e-100f-48a4-afd6-b7b84649baa1" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 25 17:20:09 crc kubenswrapper[4812]: I1125 17:20:09.134051 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh" Nov 25 17:20:09 crc kubenswrapper[4812]: I1125 17:20:09.136686 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 17:20:09 crc kubenswrapper[4812]: I1125 17:20:09.136800 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 17:20:09 crc kubenswrapper[4812]: I1125 17:20:09.136818 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wtld4" Nov 25 17:20:09 crc kubenswrapper[4812]: I1125 17:20:09.136867 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 17:20:09 crc kubenswrapper[4812]: I1125 17:20:09.139950 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 17:20:09 crc kubenswrapper[4812]: I1125 17:20:09.143518 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh"] Nov 25 17:20:09 crc kubenswrapper[4812]: I1125 17:20:09.264748 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e0eb2bb9-443c-40b2-b047-567e10cb384d-ceph\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh\" (UID: \"e0eb2bb9-443c-40b2-b047-567e10cb384d\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh" Nov 25 17:20:09 crc kubenswrapper[4812]: I1125 17:20:09.265081 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e0eb2bb9-443c-40b2-b047-567e10cb384d-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh\" (UID: \"e0eb2bb9-443c-40b2-b047-567e10cb384d\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh" Nov 25 17:20:09 crc kubenswrapper[4812]: I1125 17:20:09.265117 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8kkjv\" (UniqueName: \"kubernetes.io/projected/e0eb2bb9-443c-40b2-b047-567e10cb384d-kube-api-access-8kkjv\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh\" (UID: \"e0eb2bb9-443c-40b2-b047-567e10cb384d\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh" Nov 25 17:20:09 crc kubenswrapper[4812]: I1125 17:20:09.265157 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0eb2bb9-443c-40b2-b047-567e10cb384d-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh\" (UID: \"e0eb2bb9-443c-40b2-b047-567e10cb384d\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh" Nov 25 17:20:09 crc kubenswrapper[4812]: I1125 17:20:09.265176 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e0eb2bb9-443c-40b2-b047-567e10cb384d-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh\" (UID: \"e0eb2bb9-443c-40b2-b047-567e10cb384d\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh" Nov 25 17:20:09 crc kubenswrapper[4812]: I1125 17:20:09.366608 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e0eb2bb9-443c-40b2-b047-567e10cb384d-ceph\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh\" (UID: \"e0eb2bb9-443c-40b2-b047-567e10cb384d\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh" Nov 25 17:20:09 crc kubenswrapper[4812]: I1125 17:20:09.366644 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e0eb2bb9-443c-40b2-b047-567e10cb384d-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh\" (UID: \"e0eb2bb9-443c-40b2-b047-567e10cb384d\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh" Nov 25 17:20:09 crc kubenswrapper[4812]: I1125 17:20:09.366674 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8kkjv\" (UniqueName: \"kubernetes.io/projected/e0eb2bb9-443c-40b2-b047-567e10cb384d-kube-api-access-8kkjv\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh\" (UID: \"e0eb2bb9-443c-40b2-b047-567e10cb384d\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh" Nov 25 17:20:09 crc kubenswrapper[4812]: I1125 17:20:09.366699 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0eb2bb9-443c-40b2-b047-567e10cb384d-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh\" (UID: \"e0eb2bb9-443c-40b2-b047-567e10cb384d\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh" Nov 25 17:20:09 crc kubenswrapper[4812]: I1125 17:20:09.366718 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e0eb2bb9-443c-40b2-b047-567e10cb384d-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh\" (UID: \"e0eb2bb9-443c-40b2-b047-567e10cb384d\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh" Nov 25 17:20:09 crc kubenswrapper[4812]: I1125 17:20:09.372561 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0eb2bb9-443c-40b2-b047-567e10cb384d-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh\" (UID: \"e0eb2bb9-443c-40b2-b047-567e10cb384d\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh" Nov 25 17:20:09 crc kubenswrapper[4812]: I1125 17:20:09.372603 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e0eb2bb9-443c-40b2-b047-567e10cb384d-ceph\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh\" (UID: \"e0eb2bb9-443c-40b2-b047-567e10cb384d\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh" Nov 25 17:20:09 crc kubenswrapper[4812]: I1125 17:20:09.372861 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e0eb2bb9-443c-40b2-b047-567e10cb384d-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh\" (UID: \"e0eb2bb9-443c-40b2-b047-567e10cb384d\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh" Nov 25 17:20:09 crc kubenswrapper[4812]: I1125 17:20:09.377225 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e0eb2bb9-443c-40b2-b047-567e10cb384d-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh\" (UID: \"e0eb2bb9-443c-40b2-b047-567e10cb384d\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh" Nov 25 17:20:09 crc kubenswrapper[4812]: I1125 17:20:09.383723 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8kkjv\" (UniqueName: \"kubernetes.io/projected/e0eb2bb9-443c-40b2-b047-567e10cb384d-kube-api-access-8kkjv\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh\" (UID: \"e0eb2bb9-443c-40b2-b047-567e10cb384d\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh" Nov 25 17:20:09 crc kubenswrapper[4812]: I1125 17:20:09.452779 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh" Nov 25 17:20:09 crc kubenswrapper[4812]: I1125 17:20:09.929448 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh"] Nov 25 17:20:09 crc kubenswrapper[4812]: W1125 17:20:09.935173 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode0eb2bb9_443c_40b2_b047_567e10cb384d.slice/crio-abe9f23b7a7dfffd4d26f6ae55c62662aa84d5338b8df6d22c32790c089b97f0 WatchSource:0}: Error finding container abe9f23b7a7dfffd4d26f6ae55c62662aa84d5338b8df6d22c32790c089b97f0: Status 404 returned error can't find the container with id abe9f23b7a7dfffd4d26f6ae55c62662aa84d5338b8df6d22c32790c089b97f0 Nov 25 17:20:09 crc kubenswrapper[4812]: I1125 17:20:09.945773 4812 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 17:20:10 crc kubenswrapper[4812]: I1125 17:20:10.734036 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh" event={"ID":"e0eb2bb9-443c-40b2-b047-567e10cb384d","Type":"ContainerStarted","Data":"abe9f23b7a7dfffd4d26f6ae55c62662aa84d5338b8df6d22c32790c089b97f0"} Nov 25 17:20:11 crc kubenswrapper[4812]: I1125 17:20:11.743513 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh" event={"ID":"e0eb2bb9-443c-40b2-b047-567e10cb384d","Type":"ContainerStarted","Data":"b2b3b93f5809729146274c734f3f54c37fc4c8650c12b41691ed83dd957330b3"} Nov 25 17:20:11 crc kubenswrapper[4812]: I1125 17:20:11.767988 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh" podStartSLOduration=2.211952436 podStartE2EDuration="2.767963018s" podCreationTimestamp="2025-11-25 17:20:09 +0000 UTC" firstStartedPulling="2025-11-25 17:20:09.945510901 +0000 UTC m=+1984.785652996" lastFinishedPulling="2025-11-25 17:20:10.501521483 +0000 UTC m=+1985.341663578" observedRunningTime="2025-11-25 17:20:11.755386639 +0000 UTC m=+1986.595528744" watchObservedRunningTime="2025-11-25 17:20:11.767963018 +0000 UTC m=+1986.608105123" Nov 25 17:20:21 crc kubenswrapper[4812]: I1125 17:20:21.829841 4812 generic.go:334] "Generic (PLEG): container finished" podID="e0eb2bb9-443c-40b2-b047-567e10cb384d" containerID="b2b3b93f5809729146274c734f3f54c37fc4c8650c12b41691ed83dd957330b3" exitCode=0 Nov 25 17:20:21 crc kubenswrapper[4812]: I1125 17:20:21.829914 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh" event={"ID":"e0eb2bb9-443c-40b2-b047-567e10cb384d","Type":"ContainerDied","Data":"b2b3b93f5809729146274c734f3f54c37fc4c8650c12b41691ed83dd957330b3"} Nov 25 17:20:23 crc kubenswrapper[4812]: I1125 17:20:23.232511 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh" Nov 25 17:20:23 crc kubenswrapper[4812]: I1125 17:20:23.318884 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8kkjv\" (UniqueName: \"kubernetes.io/projected/e0eb2bb9-443c-40b2-b047-567e10cb384d-kube-api-access-8kkjv\") pod \"e0eb2bb9-443c-40b2-b047-567e10cb384d\" (UID: \"e0eb2bb9-443c-40b2-b047-567e10cb384d\") " Nov 25 17:20:23 crc kubenswrapper[4812]: I1125 17:20:23.318979 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e0eb2bb9-443c-40b2-b047-567e10cb384d-ceph\") pod \"e0eb2bb9-443c-40b2-b047-567e10cb384d\" (UID: \"e0eb2bb9-443c-40b2-b047-567e10cb384d\") " Nov 25 17:20:23 crc kubenswrapper[4812]: I1125 17:20:23.319073 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e0eb2bb9-443c-40b2-b047-567e10cb384d-inventory\") pod \"e0eb2bb9-443c-40b2-b047-567e10cb384d\" (UID: \"e0eb2bb9-443c-40b2-b047-567e10cb384d\") " Nov 25 17:20:23 crc kubenswrapper[4812]: I1125 17:20:23.319102 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0eb2bb9-443c-40b2-b047-567e10cb384d-repo-setup-combined-ca-bundle\") pod \"e0eb2bb9-443c-40b2-b047-567e10cb384d\" (UID: \"e0eb2bb9-443c-40b2-b047-567e10cb384d\") " Nov 25 17:20:23 crc kubenswrapper[4812]: I1125 17:20:23.319170 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e0eb2bb9-443c-40b2-b047-567e10cb384d-ssh-key\") pod \"e0eb2bb9-443c-40b2-b047-567e10cb384d\" (UID: \"e0eb2bb9-443c-40b2-b047-567e10cb384d\") " Nov 25 17:20:23 crc kubenswrapper[4812]: I1125 17:20:23.325197 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e0eb2bb9-443c-40b2-b047-567e10cb384d-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "e0eb2bb9-443c-40b2-b047-567e10cb384d" (UID: "e0eb2bb9-443c-40b2-b047-567e10cb384d"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:20:23 crc kubenswrapper[4812]: I1125 17:20:23.325237 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e0eb2bb9-443c-40b2-b047-567e10cb384d-kube-api-access-8kkjv" (OuterVolumeSpecName: "kube-api-access-8kkjv") pod "e0eb2bb9-443c-40b2-b047-567e10cb384d" (UID: "e0eb2bb9-443c-40b2-b047-567e10cb384d"). InnerVolumeSpecName "kube-api-access-8kkjv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:20:23 crc kubenswrapper[4812]: I1125 17:20:23.325367 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e0eb2bb9-443c-40b2-b047-567e10cb384d-ceph" (OuterVolumeSpecName: "ceph") pod "e0eb2bb9-443c-40b2-b047-567e10cb384d" (UID: "e0eb2bb9-443c-40b2-b047-567e10cb384d"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:20:23 crc kubenswrapper[4812]: I1125 17:20:23.346436 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e0eb2bb9-443c-40b2-b047-567e10cb384d-inventory" (OuterVolumeSpecName: "inventory") pod "e0eb2bb9-443c-40b2-b047-567e10cb384d" (UID: "e0eb2bb9-443c-40b2-b047-567e10cb384d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:20:23 crc kubenswrapper[4812]: I1125 17:20:23.378731 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e0eb2bb9-443c-40b2-b047-567e10cb384d-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "e0eb2bb9-443c-40b2-b047-567e10cb384d" (UID: "e0eb2bb9-443c-40b2-b047-567e10cb384d"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:20:23 crc kubenswrapper[4812]: I1125 17:20:23.421125 4812 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e0eb2bb9-443c-40b2-b047-567e10cb384d-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 17:20:23 crc kubenswrapper[4812]: I1125 17:20:23.421387 4812 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0eb2bb9-443c-40b2-b047-567e10cb384d-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:20:23 crc kubenswrapper[4812]: I1125 17:20:23.421450 4812 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e0eb2bb9-443c-40b2-b047-567e10cb384d-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 17:20:23 crc kubenswrapper[4812]: I1125 17:20:23.421517 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8kkjv\" (UniqueName: \"kubernetes.io/projected/e0eb2bb9-443c-40b2-b047-567e10cb384d-kube-api-access-8kkjv\") on node \"crc\" DevicePath \"\"" Nov 25 17:20:23 crc kubenswrapper[4812]: I1125 17:20:23.421608 4812 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e0eb2bb9-443c-40b2-b047-567e10cb384d-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 17:20:23 crc kubenswrapper[4812]: I1125 17:20:23.857143 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh" event={"ID":"e0eb2bb9-443c-40b2-b047-567e10cb384d","Type":"ContainerDied","Data":"abe9f23b7a7dfffd4d26f6ae55c62662aa84d5338b8df6d22c32790c089b97f0"} Nov 25 17:20:23 crc kubenswrapper[4812]: I1125 17:20:23.857686 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="abe9f23b7a7dfffd4d26f6ae55c62662aa84d5338b8df6d22c32790c089b97f0" Nov 25 17:20:23 crc kubenswrapper[4812]: I1125 17:20:23.857803 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rs6lh" Nov 25 17:20:23 crc kubenswrapper[4812]: I1125 17:20:23.928454 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj"] Nov 25 17:20:23 crc kubenswrapper[4812]: E1125 17:20:23.928867 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0eb2bb9-443c-40b2-b047-567e10cb384d" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 25 17:20:23 crc kubenswrapper[4812]: I1125 17:20:23.928883 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0eb2bb9-443c-40b2-b047-567e10cb384d" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 25 17:20:23 crc kubenswrapper[4812]: I1125 17:20:23.929028 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0eb2bb9-443c-40b2-b047-567e10cb384d" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 25 17:20:23 crc kubenswrapper[4812]: I1125 17:20:23.929626 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj" Nov 25 17:20:23 crc kubenswrapper[4812]: I1125 17:20:23.932923 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wtld4" Nov 25 17:20:23 crc kubenswrapper[4812]: I1125 17:20:23.932926 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 17:20:23 crc kubenswrapper[4812]: I1125 17:20:23.933077 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 17:20:23 crc kubenswrapper[4812]: I1125 17:20:23.933184 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 17:20:23 crc kubenswrapper[4812]: I1125 17:20:23.933221 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 17:20:23 crc kubenswrapper[4812]: I1125 17:20:23.947717 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj"] Nov 25 17:20:24 crc kubenswrapper[4812]: I1125 17:20:24.031040 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2c364028-0d32-43b3-89d2-adea3a783a49-ceph\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj\" (UID: \"2c364028-0d32-43b3-89d2-adea3a783a49\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj" Nov 25 17:20:24 crc kubenswrapper[4812]: I1125 17:20:24.031119 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c364028-0d32-43b3-89d2-adea3a783a49-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj\" (UID: \"2c364028-0d32-43b3-89d2-adea3a783a49\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj" Nov 25 17:20:24 crc kubenswrapper[4812]: I1125 17:20:24.031144 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2c364028-0d32-43b3-89d2-adea3a783a49-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj\" (UID: \"2c364028-0d32-43b3-89d2-adea3a783a49\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj" Nov 25 17:20:24 crc kubenswrapper[4812]: I1125 17:20:24.031167 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qsvjd\" (UniqueName: \"kubernetes.io/projected/2c364028-0d32-43b3-89d2-adea3a783a49-kube-api-access-qsvjd\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj\" (UID: \"2c364028-0d32-43b3-89d2-adea3a783a49\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj" Nov 25 17:20:24 crc kubenswrapper[4812]: I1125 17:20:24.031205 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2c364028-0d32-43b3-89d2-adea3a783a49-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj\" (UID: \"2c364028-0d32-43b3-89d2-adea3a783a49\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj" Nov 25 17:20:24 crc kubenswrapper[4812]: I1125 17:20:24.133480 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2c364028-0d32-43b3-89d2-adea3a783a49-ceph\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj\" (UID: \"2c364028-0d32-43b3-89d2-adea3a783a49\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj" Nov 25 17:20:24 crc kubenswrapper[4812]: I1125 17:20:24.133814 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c364028-0d32-43b3-89d2-adea3a783a49-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj\" (UID: \"2c364028-0d32-43b3-89d2-adea3a783a49\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj" Nov 25 17:20:24 crc kubenswrapper[4812]: I1125 17:20:24.133915 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2c364028-0d32-43b3-89d2-adea3a783a49-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj\" (UID: \"2c364028-0d32-43b3-89d2-adea3a783a49\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj" Nov 25 17:20:24 crc kubenswrapper[4812]: I1125 17:20:24.133987 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qsvjd\" (UniqueName: \"kubernetes.io/projected/2c364028-0d32-43b3-89d2-adea3a783a49-kube-api-access-qsvjd\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj\" (UID: \"2c364028-0d32-43b3-89d2-adea3a783a49\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj" Nov 25 17:20:24 crc kubenswrapper[4812]: I1125 17:20:24.134072 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2c364028-0d32-43b3-89d2-adea3a783a49-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj\" (UID: \"2c364028-0d32-43b3-89d2-adea3a783a49\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj" Nov 25 17:20:24 crc kubenswrapper[4812]: I1125 17:20:24.137724 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2c364028-0d32-43b3-89d2-adea3a783a49-ceph\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj\" (UID: \"2c364028-0d32-43b3-89d2-adea3a783a49\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj" Nov 25 17:20:24 crc kubenswrapper[4812]: I1125 17:20:24.138021 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2c364028-0d32-43b3-89d2-adea3a783a49-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj\" (UID: \"2c364028-0d32-43b3-89d2-adea3a783a49\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj" Nov 25 17:20:24 crc kubenswrapper[4812]: I1125 17:20:24.138545 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2c364028-0d32-43b3-89d2-adea3a783a49-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj\" (UID: \"2c364028-0d32-43b3-89d2-adea3a783a49\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj" Nov 25 17:20:24 crc kubenswrapper[4812]: I1125 17:20:24.139055 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c364028-0d32-43b3-89d2-adea3a783a49-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj\" (UID: \"2c364028-0d32-43b3-89d2-adea3a783a49\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj" Nov 25 17:20:24 crc kubenswrapper[4812]: I1125 17:20:24.152483 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qsvjd\" (UniqueName: \"kubernetes.io/projected/2c364028-0d32-43b3-89d2-adea3a783a49-kube-api-access-qsvjd\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj\" (UID: \"2c364028-0d32-43b3-89d2-adea3a783a49\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj" Nov 25 17:20:24 crc kubenswrapper[4812]: I1125 17:20:24.247938 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj" Nov 25 17:20:24 crc kubenswrapper[4812]: I1125 17:20:24.736111 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj"] Nov 25 17:20:24 crc kubenswrapper[4812]: I1125 17:20:24.865754 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj" event={"ID":"2c364028-0d32-43b3-89d2-adea3a783a49","Type":"ContainerStarted","Data":"22875c7b6876505bef177d3117b3e007a3e66bbdd956c8fbc37fcb45439e88d2"} Nov 25 17:20:25 crc kubenswrapper[4812]: I1125 17:20:25.884917 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj" event={"ID":"2c364028-0d32-43b3-89d2-adea3a783a49","Type":"ContainerStarted","Data":"e1f2820fa09adb6994b49a846bce24ee9418d9155e4f910ff0ea0c111ca26aeb"} Nov 25 17:20:25 crc kubenswrapper[4812]: I1125 17:20:25.910692 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj" podStartSLOduration=2.515846419 podStartE2EDuration="2.910672067s" podCreationTimestamp="2025-11-25 17:20:23 +0000 UTC" firstStartedPulling="2025-11-25 17:20:24.740840691 +0000 UTC m=+1999.580982786" lastFinishedPulling="2025-11-25 17:20:25.135666339 +0000 UTC m=+1999.975808434" observedRunningTime="2025-11-25 17:20:25.903850663 +0000 UTC m=+2000.743992758" watchObservedRunningTime="2025-11-25 17:20:25.910672067 +0000 UTC m=+2000.750814172" Nov 25 17:21:00 crc kubenswrapper[4812]: I1125 17:21:00.534595 4812 scope.go:117] "RemoveContainer" containerID="bc775ca91f89af73440faa7a36a4895a30c998ec768624452c98d855eb3736ca" Nov 25 17:21:00 crc kubenswrapper[4812]: I1125 17:21:00.594828 4812 scope.go:117] "RemoveContainer" containerID="79f2cf22d092d9b8fe94e1f1c6756b1553e623a8650ca5b4b6f22aef7f1539e6" Nov 25 17:21:00 crc kubenswrapper[4812]: I1125 17:21:00.630320 4812 scope.go:117] "RemoveContainer" containerID="3d9833a6b5030da3335af7b4a4871113fe6616049684cda574a47844932dd9ed" Nov 25 17:21:00 crc kubenswrapper[4812]: I1125 17:21:00.663014 4812 scope.go:117] "RemoveContainer" containerID="c915e0f5460edce953cce2157147360fd24054c7dfe2f403e2c1981b99f099a8" Nov 25 17:21:00 crc kubenswrapper[4812]: I1125 17:21:00.726929 4812 scope.go:117] "RemoveContainer" containerID="ac39971a23a1d7dfe5e74f9a4d8477464e4067365d842a9ed93e1f1d75710261" Nov 25 17:21:00 crc kubenswrapper[4812]: I1125 17:21:00.768387 4812 scope.go:117] "RemoveContainer" containerID="ab79174a1b7656220f755fc746961c6e91846695342237af85e771b4f7cfd6cf" Nov 25 17:21:00 crc kubenswrapper[4812]: I1125 17:21:00.804572 4812 scope.go:117] "RemoveContainer" containerID="a75b9c52212987054edd4d53fecd8822dd3185c37899ee6dfd76c3a40c8cd155" Nov 25 17:21:27 crc kubenswrapper[4812]: I1125 17:21:27.333118 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:21:27 crc kubenswrapper[4812]: I1125 17:21:27.333792 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:21:57 crc kubenswrapper[4812]: I1125 17:21:57.333109 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:21:57 crc kubenswrapper[4812]: I1125 17:21:57.333653 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:22:00 crc kubenswrapper[4812]: I1125 17:22:00.723839 4812 generic.go:334] "Generic (PLEG): container finished" podID="2c364028-0d32-43b3-89d2-adea3a783a49" containerID="e1f2820fa09adb6994b49a846bce24ee9418d9155e4f910ff0ea0c111ca26aeb" exitCode=0 Nov 25 17:22:00 crc kubenswrapper[4812]: I1125 17:22:00.723916 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj" event={"ID":"2c364028-0d32-43b3-89d2-adea3a783a49","Type":"ContainerDied","Data":"e1f2820fa09adb6994b49a846bce24ee9418d9155e4f910ff0ea0c111ca26aeb"} Nov 25 17:22:00 crc kubenswrapper[4812]: I1125 17:22:00.985514 4812 scope.go:117] "RemoveContainer" containerID="e8a939ace6426592fb9b95880fc998fc42af21d5d088da79b05fd1c461c6fbe8" Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.142956 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj" Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.343893 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qsvjd\" (UniqueName: \"kubernetes.io/projected/2c364028-0d32-43b3-89d2-adea3a783a49-kube-api-access-qsvjd\") pod \"2c364028-0d32-43b3-89d2-adea3a783a49\" (UID: \"2c364028-0d32-43b3-89d2-adea3a783a49\") " Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.343959 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2c364028-0d32-43b3-89d2-adea3a783a49-ssh-key\") pod \"2c364028-0d32-43b3-89d2-adea3a783a49\" (UID: \"2c364028-0d32-43b3-89d2-adea3a783a49\") " Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.344003 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c364028-0d32-43b3-89d2-adea3a783a49-bootstrap-combined-ca-bundle\") pod \"2c364028-0d32-43b3-89d2-adea3a783a49\" (UID: \"2c364028-0d32-43b3-89d2-adea3a783a49\") " Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.344061 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2c364028-0d32-43b3-89d2-adea3a783a49-inventory\") pod \"2c364028-0d32-43b3-89d2-adea3a783a49\" (UID: \"2c364028-0d32-43b3-89d2-adea3a783a49\") " Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.344118 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2c364028-0d32-43b3-89d2-adea3a783a49-ceph\") pod \"2c364028-0d32-43b3-89d2-adea3a783a49\" (UID: \"2c364028-0d32-43b3-89d2-adea3a783a49\") " Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.353418 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c364028-0d32-43b3-89d2-adea3a783a49-kube-api-access-qsvjd" (OuterVolumeSpecName: "kube-api-access-qsvjd") pod "2c364028-0d32-43b3-89d2-adea3a783a49" (UID: "2c364028-0d32-43b3-89d2-adea3a783a49"). InnerVolumeSpecName "kube-api-access-qsvjd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.354113 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c364028-0d32-43b3-89d2-adea3a783a49-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "2c364028-0d32-43b3-89d2-adea3a783a49" (UID: "2c364028-0d32-43b3-89d2-adea3a783a49"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.355465 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c364028-0d32-43b3-89d2-adea3a783a49-ceph" (OuterVolumeSpecName: "ceph") pod "2c364028-0d32-43b3-89d2-adea3a783a49" (UID: "2c364028-0d32-43b3-89d2-adea3a783a49"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.378497 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c364028-0d32-43b3-89d2-adea3a783a49-inventory" (OuterVolumeSpecName: "inventory") pod "2c364028-0d32-43b3-89d2-adea3a783a49" (UID: "2c364028-0d32-43b3-89d2-adea3a783a49"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.378574 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c364028-0d32-43b3-89d2-adea3a783a49-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "2c364028-0d32-43b3-89d2-adea3a783a49" (UID: "2c364028-0d32-43b3-89d2-adea3a783a49"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.445928 4812 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2c364028-0d32-43b3-89d2-adea3a783a49-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.445964 4812 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c364028-0d32-43b3-89d2-adea3a783a49-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.446029 4812 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2c364028-0d32-43b3-89d2-adea3a783a49-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.446560 4812 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2c364028-0d32-43b3-89d2-adea3a783a49-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.446576 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qsvjd\" (UniqueName: \"kubernetes.io/projected/2c364028-0d32-43b3-89d2-adea3a783a49-kube-api-access-qsvjd\") on node \"crc\" DevicePath \"\"" Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.742718 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj" event={"ID":"2c364028-0d32-43b3-89d2-adea3a783a49","Type":"ContainerDied","Data":"22875c7b6876505bef177d3117b3e007a3e66bbdd956c8fbc37fcb45439e88d2"} Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.742757 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="22875c7b6876505bef177d3117b3e007a3e66bbdd956c8fbc37fcb45439e88d2" Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.742806 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6l8qj" Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.818034 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-mv6jn"] Nov 25 17:22:02 crc kubenswrapper[4812]: E1125 17:22:02.822034 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c364028-0d32-43b3-89d2-adea3a783a49" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.822077 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c364028-0d32-43b3-89d2-adea3a783a49" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.822296 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c364028-0d32-43b3-89d2-adea3a783a49" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.822943 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-mv6jn" Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.824855 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.825032 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wtld4" Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.825312 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.825439 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.825906 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.831487 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-mv6jn"] Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.853756 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/60d669de-f05f-443d-a3cb-74d5c319d7fe-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-mv6jn\" (UID: \"60d669de-f05f-443d-a3cb-74d5c319d7fe\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-mv6jn" Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.853810 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/60d669de-f05f-443d-a3cb-74d5c319d7fe-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-mv6jn\" (UID: \"60d669de-f05f-443d-a3cb-74d5c319d7fe\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-mv6jn" Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.853908 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n6hsf\" (UniqueName: \"kubernetes.io/projected/60d669de-f05f-443d-a3cb-74d5c319d7fe-kube-api-access-n6hsf\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-mv6jn\" (UID: \"60d669de-f05f-443d-a3cb-74d5c319d7fe\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-mv6jn" Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.853938 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/60d669de-f05f-443d-a3cb-74d5c319d7fe-ceph\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-mv6jn\" (UID: \"60d669de-f05f-443d-a3cb-74d5c319d7fe\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-mv6jn" Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.955169 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n6hsf\" (UniqueName: \"kubernetes.io/projected/60d669de-f05f-443d-a3cb-74d5c319d7fe-kube-api-access-n6hsf\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-mv6jn\" (UID: \"60d669de-f05f-443d-a3cb-74d5c319d7fe\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-mv6jn" Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.955218 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/60d669de-f05f-443d-a3cb-74d5c319d7fe-ceph\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-mv6jn\" (UID: \"60d669de-f05f-443d-a3cb-74d5c319d7fe\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-mv6jn" Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.955318 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/60d669de-f05f-443d-a3cb-74d5c319d7fe-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-mv6jn\" (UID: \"60d669de-f05f-443d-a3cb-74d5c319d7fe\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-mv6jn" Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.955343 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/60d669de-f05f-443d-a3cb-74d5c319d7fe-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-mv6jn\" (UID: \"60d669de-f05f-443d-a3cb-74d5c319d7fe\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-mv6jn" Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.959262 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/60d669de-f05f-443d-a3cb-74d5c319d7fe-ceph\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-mv6jn\" (UID: \"60d669de-f05f-443d-a3cb-74d5c319d7fe\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-mv6jn" Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.959477 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/60d669de-f05f-443d-a3cb-74d5c319d7fe-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-mv6jn\" (UID: \"60d669de-f05f-443d-a3cb-74d5c319d7fe\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-mv6jn" Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.964647 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/60d669de-f05f-443d-a3cb-74d5c319d7fe-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-mv6jn\" (UID: \"60d669de-f05f-443d-a3cb-74d5c319d7fe\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-mv6jn" Nov 25 17:22:02 crc kubenswrapper[4812]: I1125 17:22:02.984641 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n6hsf\" (UniqueName: \"kubernetes.io/projected/60d669de-f05f-443d-a3cb-74d5c319d7fe-kube-api-access-n6hsf\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-mv6jn\" (UID: \"60d669de-f05f-443d-a3cb-74d5c319d7fe\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-mv6jn" Nov 25 17:22:03 crc kubenswrapper[4812]: I1125 17:22:03.140801 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-mv6jn" Nov 25 17:22:03 crc kubenswrapper[4812]: I1125 17:22:03.707465 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-mv6jn"] Nov 25 17:22:03 crc kubenswrapper[4812]: I1125 17:22:03.751566 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-mv6jn" event={"ID":"60d669de-f05f-443d-a3cb-74d5c319d7fe","Type":"ContainerStarted","Data":"d6e5a8fdc28623f4e2ab2f102d962342a630a9344bdbd424fe1114d37cd894f3"} Nov 25 17:22:04 crc kubenswrapper[4812]: I1125 17:22:04.766441 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-mv6jn" event={"ID":"60d669de-f05f-443d-a3cb-74d5c319d7fe","Type":"ContainerStarted","Data":"9d7b79a9aa057bd06c63de2587a0a18c38c84393bf1d467b5083d0b815737bc4"} Nov 25 17:22:05 crc kubenswrapper[4812]: I1125 17:22:05.785152 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-mv6jn" podStartSLOduration=3.069259424 podStartE2EDuration="3.785131124s" podCreationTimestamp="2025-11-25 17:22:02 +0000 UTC" firstStartedPulling="2025-11-25 17:22:03.715842158 +0000 UTC m=+2098.555984253" lastFinishedPulling="2025-11-25 17:22:04.431713858 +0000 UTC m=+2099.271855953" observedRunningTime="2025-11-25 17:22:05.785127713 +0000 UTC m=+2100.625269808" watchObservedRunningTime="2025-11-25 17:22:05.785131124 +0000 UTC m=+2100.625273219" Nov 25 17:22:11 crc kubenswrapper[4812]: I1125 17:22:11.800238 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-np6ct"] Nov 25 17:22:11 crc kubenswrapper[4812]: I1125 17:22:11.803883 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-np6ct" Nov 25 17:22:11 crc kubenswrapper[4812]: I1125 17:22:11.817990 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-np6ct"] Nov 25 17:22:11 crc kubenswrapper[4812]: I1125 17:22:11.822118 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3bc492fe-e212-4e8b-8efc-0e0956e2e9c6-catalog-content\") pod \"redhat-marketplace-np6ct\" (UID: \"3bc492fe-e212-4e8b-8efc-0e0956e2e9c6\") " pod="openshift-marketplace/redhat-marketplace-np6ct" Nov 25 17:22:11 crc kubenswrapper[4812]: I1125 17:22:11.822255 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3bc492fe-e212-4e8b-8efc-0e0956e2e9c6-utilities\") pod \"redhat-marketplace-np6ct\" (UID: \"3bc492fe-e212-4e8b-8efc-0e0956e2e9c6\") " pod="openshift-marketplace/redhat-marketplace-np6ct" Nov 25 17:22:11 crc kubenswrapper[4812]: I1125 17:22:11.822360 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gtmzb\" (UniqueName: \"kubernetes.io/projected/3bc492fe-e212-4e8b-8efc-0e0956e2e9c6-kube-api-access-gtmzb\") pod \"redhat-marketplace-np6ct\" (UID: \"3bc492fe-e212-4e8b-8efc-0e0956e2e9c6\") " pod="openshift-marketplace/redhat-marketplace-np6ct" Nov 25 17:22:11 crc kubenswrapper[4812]: I1125 17:22:11.923498 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3bc492fe-e212-4e8b-8efc-0e0956e2e9c6-utilities\") pod \"redhat-marketplace-np6ct\" (UID: \"3bc492fe-e212-4e8b-8efc-0e0956e2e9c6\") " pod="openshift-marketplace/redhat-marketplace-np6ct" Nov 25 17:22:11 crc kubenswrapper[4812]: I1125 17:22:11.923619 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gtmzb\" (UniqueName: \"kubernetes.io/projected/3bc492fe-e212-4e8b-8efc-0e0956e2e9c6-kube-api-access-gtmzb\") pod \"redhat-marketplace-np6ct\" (UID: \"3bc492fe-e212-4e8b-8efc-0e0956e2e9c6\") " pod="openshift-marketplace/redhat-marketplace-np6ct" Nov 25 17:22:11 crc kubenswrapper[4812]: I1125 17:22:11.923707 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3bc492fe-e212-4e8b-8efc-0e0956e2e9c6-catalog-content\") pod \"redhat-marketplace-np6ct\" (UID: \"3bc492fe-e212-4e8b-8efc-0e0956e2e9c6\") " pod="openshift-marketplace/redhat-marketplace-np6ct" Nov 25 17:22:11 crc kubenswrapper[4812]: I1125 17:22:11.924258 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3bc492fe-e212-4e8b-8efc-0e0956e2e9c6-catalog-content\") pod \"redhat-marketplace-np6ct\" (UID: \"3bc492fe-e212-4e8b-8efc-0e0956e2e9c6\") " pod="openshift-marketplace/redhat-marketplace-np6ct" Nov 25 17:22:11 crc kubenswrapper[4812]: I1125 17:22:11.924464 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3bc492fe-e212-4e8b-8efc-0e0956e2e9c6-utilities\") pod \"redhat-marketplace-np6ct\" (UID: \"3bc492fe-e212-4e8b-8efc-0e0956e2e9c6\") " pod="openshift-marketplace/redhat-marketplace-np6ct" Nov 25 17:22:11 crc kubenswrapper[4812]: I1125 17:22:11.941643 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gtmzb\" (UniqueName: \"kubernetes.io/projected/3bc492fe-e212-4e8b-8efc-0e0956e2e9c6-kube-api-access-gtmzb\") pod \"redhat-marketplace-np6ct\" (UID: \"3bc492fe-e212-4e8b-8efc-0e0956e2e9c6\") " pod="openshift-marketplace/redhat-marketplace-np6ct" Nov 25 17:22:12 crc kubenswrapper[4812]: I1125 17:22:12.130617 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-np6ct" Nov 25 17:22:12 crc kubenswrapper[4812]: I1125 17:22:12.607009 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-np6ct"] Nov 25 17:22:12 crc kubenswrapper[4812]: I1125 17:22:12.837026 4812 generic.go:334] "Generic (PLEG): container finished" podID="3bc492fe-e212-4e8b-8efc-0e0956e2e9c6" containerID="319bee1d69fb6361463fc0f1d6285cfebf124b4cfc35a65b86374a39767d872e" exitCode=0 Nov 25 17:22:12 crc kubenswrapper[4812]: I1125 17:22:12.837076 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-np6ct" event={"ID":"3bc492fe-e212-4e8b-8efc-0e0956e2e9c6","Type":"ContainerDied","Data":"319bee1d69fb6361463fc0f1d6285cfebf124b4cfc35a65b86374a39767d872e"} Nov 25 17:22:12 crc kubenswrapper[4812]: I1125 17:22:12.837125 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-np6ct" event={"ID":"3bc492fe-e212-4e8b-8efc-0e0956e2e9c6","Type":"ContainerStarted","Data":"178ddbbb2d1866074540c5a792060e13e8cbd06913c923f9d4116e1b36153d78"} Nov 25 17:22:15 crc kubenswrapper[4812]: I1125 17:22:15.865388 4812 generic.go:334] "Generic (PLEG): container finished" podID="3bc492fe-e212-4e8b-8efc-0e0956e2e9c6" containerID="9409c865b175fe3d63c6b33af247e1d7b9e8705de610ec82334cf0bc3c995116" exitCode=0 Nov 25 17:22:15 crc kubenswrapper[4812]: I1125 17:22:15.865917 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-np6ct" event={"ID":"3bc492fe-e212-4e8b-8efc-0e0956e2e9c6","Type":"ContainerDied","Data":"9409c865b175fe3d63c6b33af247e1d7b9e8705de610ec82334cf0bc3c995116"} Nov 25 17:22:16 crc kubenswrapper[4812]: I1125 17:22:16.876670 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-np6ct" event={"ID":"3bc492fe-e212-4e8b-8efc-0e0956e2e9c6","Type":"ContainerStarted","Data":"e8578acbc84e151dd796649b50f87e724b23473ac0ffac979a18c5755d388ae4"} Nov 25 17:22:16 crc kubenswrapper[4812]: I1125 17:22:16.896515 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-np6ct" podStartSLOduration=2.440465143 podStartE2EDuration="5.896499333s" podCreationTimestamp="2025-11-25 17:22:11 +0000 UTC" firstStartedPulling="2025-11-25 17:22:12.839174478 +0000 UTC m=+2107.679316573" lastFinishedPulling="2025-11-25 17:22:16.295208668 +0000 UTC m=+2111.135350763" observedRunningTime="2025-11-25 17:22:16.891333784 +0000 UTC m=+2111.731475879" watchObservedRunningTime="2025-11-25 17:22:16.896499333 +0000 UTC m=+2111.736641428" Nov 25 17:22:22 crc kubenswrapper[4812]: I1125 17:22:22.132265 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-np6ct" Nov 25 17:22:22 crc kubenswrapper[4812]: I1125 17:22:22.132654 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-np6ct" Nov 25 17:22:22 crc kubenswrapper[4812]: I1125 17:22:22.179477 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-np6ct" Nov 25 17:22:22 crc kubenswrapper[4812]: I1125 17:22:22.998126 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-np6ct" Nov 25 17:22:23 crc kubenswrapper[4812]: I1125 17:22:23.043051 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-np6ct"] Nov 25 17:22:24 crc kubenswrapper[4812]: I1125 17:22:24.965091 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-np6ct" podUID="3bc492fe-e212-4e8b-8efc-0e0956e2e9c6" containerName="registry-server" containerID="cri-o://e8578acbc84e151dd796649b50f87e724b23473ac0ffac979a18c5755d388ae4" gracePeriod=2 Nov 25 17:22:25 crc kubenswrapper[4812]: I1125 17:22:25.451459 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-np6ct" Nov 25 17:22:25 crc kubenswrapper[4812]: I1125 17:22:25.572732 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3bc492fe-e212-4e8b-8efc-0e0956e2e9c6-catalog-content\") pod \"3bc492fe-e212-4e8b-8efc-0e0956e2e9c6\" (UID: \"3bc492fe-e212-4e8b-8efc-0e0956e2e9c6\") " Nov 25 17:22:25 crc kubenswrapper[4812]: I1125 17:22:25.572791 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gtmzb\" (UniqueName: \"kubernetes.io/projected/3bc492fe-e212-4e8b-8efc-0e0956e2e9c6-kube-api-access-gtmzb\") pod \"3bc492fe-e212-4e8b-8efc-0e0956e2e9c6\" (UID: \"3bc492fe-e212-4e8b-8efc-0e0956e2e9c6\") " Nov 25 17:22:25 crc kubenswrapper[4812]: I1125 17:22:25.572894 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3bc492fe-e212-4e8b-8efc-0e0956e2e9c6-utilities\") pod \"3bc492fe-e212-4e8b-8efc-0e0956e2e9c6\" (UID: \"3bc492fe-e212-4e8b-8efc-0e0956e2e9c6\") " Nov 25 17:22:25 crc kubenswrapper[4812]: I1125 17:22:25.574059 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3bc492fe-e212-4e8b-8efc-0e0956e2e9c6-utilities" (OuterVolumeSpecName: "utilities") pod "3bc492fe-e212-4e8b-8efc-0e0956e2e9c6" (UID: "3bc492fe-e212-4e8b-8efc-0e0956e2e9c6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:22:25 crc kubenswrapper[4812]: I1125 17:22:25.578430 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3bc492fe-e212-4e8b-8efc-0e0956e2e9c6-kube-api-access-gtmzb" (OuterVolumeSpecName: "kube-api-access-gtmzb") pod "3bc492fe-e212-4e8b-8efc-0e0956e2e9c6" (UID: "3bc492fe-e212-4e8b-8efc-0e0956e2e9c6"). InnerVolumeSpecName "kube-api-access-gtmzb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:22:25 crc kubenswrapper[4812]: I1125 17:22:25.590805 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3bc492fe-e212-4e8b-8efc-0e0956e2e9c6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3bc492fe-e212-4e8b-8efc-0e0956e2e9c6" (UID: "3bc492fe-e212-4e8b-8efc-0e0956e2e9c6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:22:25 crc kubenswrapper[4812]: I1125 17:22:25.674911 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3bc492fe-e212-4e8b-8efc-0e0956e2e9c6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:22:25 crc kubenswrapper[4812]: I1125 17:22:25.674957 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gtmzb\" (UniqueName: \"kubernetes.io/projected/3bc492fe-e212-4e8b-8efc-0e0956e2e9c6-kube-api-access-gtmzb\") on node \"crc\" DevicePath \"\"" Nov 25 17:22:25 crc kubenswrapper[4812]: I1125 17:22:25.674971 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3bc492fe-e212-4e8b-8efc-0e0956e2e9c6-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:22:25 crc kubenswrapper[4812]: I1125 17:22:25.977824 4812 generic.go:334] "Generic (PLEG): container finished" podID="3bc492fe-e212-4e8b-8efc-0e0956e2e9c6" containerID="e8578acbc84e151dd796649b50f87e724b23473ac0ffac979a18c5755d388ae4" exitCode=0 Nov 25 17:22:25 crc kubenswrapper[4812]: I1125 17:22:25.977878 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-np6ct" event={"ID":"3bc492fe-e212-4e8b-8efc-0e0956e2e9c6","Type":"ContainerDied","Data":"e8578acbc84e151dd796649b50f87e724b23473ac0ffac979a18c5755d388ae4"} Nov 25 17:22:25 crc kubenswrapper[4812]: I1125 17:22:25.977910 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-np6ct" event={"ID":"3bc492fe-e212-4e8b-8efc-0e0956e2e9c6","Type":"ContainerDied","Data":"178ddbbb2d1866074540c5a792060e13e8cbd06913c923f9d4116e1b36153d78"} Nov 25 17:22:25 crc kubenswrapper[4812]: I1125 17:22:25.977929 4812 scope.go:117] "RemoveContainer" containerID="e8578acbc84e151dd796649b50f87e724b23473ac0ffac979a18c5755d388ae4" Nov 25 17:22:25 crc kubenswrapper[4812]: I1125 17:22:25.978069 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-np6ct" Nov 25 17:22:26 crc kubenswrapper[4812]: I1125 17:22:26.007598 4812 scope.go:117] "RemoveContainer" containerID="9409c865b175fe3d63c6b33af247e1d7b9e8705de610ec82334cf0bc3c995116" Nov 25 17:22:26 crc kubenswrapper[4812]: I1125 17:22:26.008553 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-np6ct"] Nov 25 17:22:26 crc kubenswrapper[4812]: I1125 17:22:26.020226 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-np6ct"] Nov 25 17:22:26 crc kubenswrapper[4812]: I1125 17:22:26.034481 4812 scope.go:117] "RemoveContainer" containerID="319bee1d69fb6361463fc0f1d6285cfebf124b4cfc35a65b86374a39767d872e" Nov 25 17:22:26 crc kubenswrapper[4812]: I1125 17:22:26.083754 4812 scope.go:117] "RemoveContainer" containerID="e8578acbc84e151dd796649b50f87e724b23473ac0ffac979a18c5755d388ae4" Nov 25 17:22:26 crc kubenswrapper[4812]: E1125 17:22:26.084636 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e8578acbc84e151dd796649b50f87e724b23473ac0ffac979a18c5755d388ae4\": container with ID starting with e8578acbc84e151dd796649b50f87e724b23473ac0ffac979a18c5755d388ae4 not found: ID does not exist" containerID="e8578acbc84e151dd796649b50f87e724b23473ac0ffac979a18c5755d388ae4" Nov 25 17:22:26 crc kubenswrapper[4812]: I1125 17:22:26.084696 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e8578acbc84e151dd796649b50f87e724b23473ac0ffac979a18c5755d388ae4"} err="failed to get container status \"e8578acbc84e151dd796649b50f87e724b23473ac0ffac979a18c5755d388ae4\": rpc error: code = NotFound desc = could not find container \"e8578acbc84e151dd796649b50f87e724b23473ac0ffac979a18c5755d388ae4\": container with ID starting with e8578acbc84e151dd796649b50f87e724b23473ac0ffac979a18c5755d388ae4 not found: ID does not exist" Nov 25 17:22:26 crc kubenswrapper[4812]: I1125 17:22:26.084731 4812 scope.go:117] "RemoveContainer" containerID="9409c865b175fe3d63c6b33af247e1d7b9e8705de610ec82334cf0bc3c995116" Nov 25 17:22:26 crc kubenswrapper[4812]: E1125 17:22:26.086037 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9409c865b175fe3d63c6b33af247e1d7b9e8705de610ec82334cf0bc3c995116\": container with ID starting with 9409c865b175fe3d63c6b33af247e1d7b9e8705de610ec82334cf0bc3c995116 not found: ID does not exist" containerID="9409c865b175fe3d63c6b33af247e1d7b9e8705de610ec82334cf0bc3c995116" Nov 25 17:22:26 crc kubenswrapper[4812]: I1125 17:22:26.086077 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9409c865b175fe3d63c6b33af247e1d7b9e8705de610ec82334cf0bc3c995116"} err="failed to get container status \"9409c865b175fe3d63c6b33af247e1d7b9e8705de610ec82334cf0bc3c995116\": rpc error: code = NotFound desc = could not find container \"9409c865b175fe3d63c6b33af247e1d7b9e8705de610ec82334cf0bc3c995116\": container with ID starting with 9409c865b175fe3d63c6b33af247e1d7b9e8705de610ec82334cf0bc3c995116 not found: ID does not exist" Nov 25 17:22:26 crc kubenswrapper[4812]: I1125 17:22:26.086105 4812 scope.go:117] "RemoveContainer" containerID="319bee1d69fb6361463fc0f1d6285cfebf124b4cfc35a65b86374a39767d872e" Nov 25 17:22:26 crc kubenswrapper[4812]: E1125 17:22:26.086444 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"319bee1d69fb6361463fc0f1d6285cfebf124b4cfc35a65b86374a39767d872e\": container with ID starting with 319bee1d69fb6361463fc0f1d6285cfebf124b4cfc35a65b86374a39767d872e not found: ID does not exist" containerID="319bee1d69fb6361463fc0f1d6285cfebf124b4cfc35a65b86374a39767d872e" Nov 25 17:22:26 crc kubenswrapper[4812]: I1125 17:22:26.086477 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"319bee1d69fb6361463fc0f1d6285cfebf124b4cfc35a65b86374a39767d872e"} err="failed to get container status \"319bee1d69fb6361463fc0f1d6285cfebf124b4cfc35a65b86374a39767d872e\": rpc error: code = NotFound desc = could not find container \"319bee1d69fb6361463fc0f1d6285cfebf124b4cfc35a65b86374a39767d872e\": container with ID starting with 319bee1d69fb6361463fc0f1d6285cfebf124b4cfc35a65b86374a39767d872e not found: ID does not exist" Nov 25 17:22:27 crc kubenswrapper[4812]: I1125 17:22:27.332867 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:22:27 crc kubenswrapper[4812]: I1125 17:22:27.332930 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:22:27 crc kubenswrapper[4812]: I1125 17:22:27.332980 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" Nov 25 17:22:27 crc kubenswrapper[4812]: I1125 17:22:27.333755 4812 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1ce2565d170db8a46eee60e79be5aef9c670f9e94c93580922535f0b2f9ddf57"} pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 17:22:27 crc kubenswrapper[4812]: I1125 17:22:27.333828 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" containerID="cri-o://1ce2565d170db8a46eee60e79be5aef9c670f9e94c93580922535f0b2f9ddf57" gracePeriod=600 Nov 25 17:22:27 crc kubenswrapper[4812]: I1125 17:22:27.844089 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3bc492fe-e212-4e8b-8efc-0e0956e2e9c6" path="/var/lib/kubelet/pods/3bc492fe-e212-4e8b-8efc-0e0956e2e9c6/volumes" Nov 25 17:22:27 crc kubenswrapper[4812]: I1125 17:22:27.997759 4812 generic.go:334] "Generic (PLEG): container finished" podID="8ed911cf-2139-4b12-84ba-af635585ba29" containerID="1ce2565d170db8a46eee60e79be5aef9c670f9e94c93580922535f0b2f9ddf57" exitCode=0 Nov 25 17:22:27 crc kubenswrapper[4812]: I1125 17:22:27.997814 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" event={"ID":"8ed911cf-2139-4b12-84ba-af635585ba29","Type":"ContainerDied","Data":"1ce2565d170db8a46eee60e79be5aef9c670f9e94c93580922535f0b2f9ddf57"} Nov 25 17:22:27 crc kubenswrapper[4812]: I1125 17:22:27.997854 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" event={"ID":"8ed911cf-2139-4b12-84ba-af635585ba29","Type":"ContainerStarted","Data":"96bbc51328ecd8c911234471c365bd30cd270eb8ef7d45da0893e8889cdaf1af"} Nov 25 17:22:27 crc kubenswrapper[4812]: I1125 17:22:27.997875 4812 scope.go:117] "RemoveContainer" containerID="ae34a05aa38c6a99c86afe89c120c203f727ec066fc3aabb6dbd6d38ccbc4ae5" Nov 25 17:22:30 crc kubenswrapper[4812]: I1125 17:22:30.020643 4812 generic.go:334] "Generic (PLEG): container finished" podID="60d669de-f05f-443d-a3cb-74d5c319d7fe" containerID="9d7b79a9aa057bd06c63de2587a0a18c38c84393bf1d467b5083d0b815737bc4" exitCode=0 Nov 25 17:22:30 crc kubenswrapper[4812]: I1125 17:22:30.020990 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-mv6jn" event={"ID":"60d669de-f05f-443d-a3cb-74d5c319d7fe","Type":"ContainerDied","Data":"9d7b79a9aa057bd06c63de2587a0a18c38c84393bf1d467b5083d0b815737bc4"} Nov 25 17:22:31 crc kubenswrapper[4812]: I1125 17:22:31.625091 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-mv6jn" Nov 25 17:22:31 crc kubenswrapper[4812]: I1125 17:22:31.790658 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n6hsf\" (UniqueName: \"kubernetes.io/projected/60d669de-f05f-443d-a3cb-74d5c319d7fe-kube-api-access-n6hsf\") pod \"60d669de-f05f-443d-a3cb-74d5c319d7fe\" (UID: \"60d669de-f05f-443d-a3cb-74d5c319d7fe\") " Nov 25 17:22:31 crc kubenswrapper[4812]: I1125 17:22:31.790717 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/60d669de-f05f-443d-a3cb-74d5c319d7fe-ssh-key\") pod \"60d669de-f05f-443d-a3cb-74d5c319d7fe\" (UID: \"60d669de-f05f-443d-a3cb-74d5c319d7fe\") " Nov 25 17:22:31 crc kubenswrapper[4812]: I1125 17:22:31.790818 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/60d669de-f05f-443d-a3cb-74d5c319d7fe-ceph\") pod \"60d669de-f05f-443d-a3cb-74d5c319d7fe\" (UID: \"60d669de-f05f-443d-a3cb-74d5c319d7fe\") " Nov 25 17:22:31 crc kubenswrapper[4812]: I1125 17:22:31.790898 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/60d669de-f05f-443d-a3cb-74d5c319d7fe-inventory\") pod \"60d669de-f05f-443d-a3cb-74d5c319d7fe\" (UID: \"60d669de-f05f-443d-a3cb-74d5c319d7fe\") " Nov 25 17:22:31 crc kubenswrapper[4812]: I1125 17:22:31.797173 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/60d669de-f05f-443d-a3cb-74d5c319d7fe-ceph" (OuterVolumeSpecName: "ceph") pod "60d669de-f05f-443d-a3cb-74d5c319d7fe" (UID: "60d669de-f05f-443d-a3cb-74d5c319d7fe"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:22:31 crc kubenswrapper[4812]: I1125 17:22:31.797757 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/60d669de-f05f-443d-a3cb-74d5c319d7fe-kube-api-access-n6hsf" (OuterVolumeSpecName: "kube-api-access-n6hsf") pod "60d669de-f05f-443d-a3cb-74d5c319d7fe" (UID: "60d669de-f05f-443d-a3cb-74d5c319d7fe"). InnerVolumeSpecName "kube-api-access-n6hsf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:22:31 crc kubenswrapper[4812]: I1125 17:22:31.819988 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/60d669de-f05f-443d-a3cb-74d5c319d7fe-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "60d669de-f05f-443d-a3cb-74d5c319d7fe" (UID: "60d669de-f05f-443d-a3cb-74d5c319d7fe"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:22:31 crc kubenswrapper[4812]: I1125 17:22:31.839732 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/60d669de-f05f-443d-a3cb-74d5c319d7fe-inventory" (OuterVolumeSpecName: "inventory") pod "60d669de-f05f-443d-a3cb-74d5c319d7fe" (UID: "60d669de-f05f-443d-a3cb-74d5c319d7fe"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:22:31 crc kubenswrapper[4812]: I1125 17:22:31.893803 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n6hsf\" (UniqueName: \"kubernetes.io/projected/60d669de-f05f-443d-a3cb-74d5c319d7fe-kube-api-access-n6hsf\") on node \"crc\" DevicePath \"\"" Nov 25 17:22:31 crc kubenswrapper[4812]: I1125 17:22:31.893835 4812 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/60d669de-f05f-443d-a3cb-74d5c319d7fe-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 17:22:31 crc kubenswrapper[4812]: I1125 17:22:31.893845 4812 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/60d669de-f05f-443d-a3cb-74d5c319d7fe-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 17:22:31 crc kubenswrapper[4812]: I1125 17:22:31.893854 4812 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/60d669de-f05f-443d-a3cb-74d5c319d7fe-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 17:22:32 crc kubenswrapper[4812]: I1125 17:22:32.056440 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-mv6jn" event={"ID":"60d669de-f05f-443d-a3cb-74d5c319d7fe","Type":"ContainerDied","Data":"d6e5a8fdc28623f4e2ab2f102d962342a630a9344bdbd424fe1114d37cd894f3"} Nov 25 17:22:32 crc kubenswrapper[4812]: I1125 17:22:32.056717 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d6e5a8fdc28623f4e2ab2f102d962342a630a9344bdbd424fe1114d37cd894f3" Nov 25 17:22:32 crc kubenswrapper[4812]: I1125 17:22:32.056512 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-mv6jn" Nov 25 17:22:32 crc kubenswrapper[4812]: I1125 17:22:32.137209 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6k5qr"] Nov 25 17:22:32 crc kubenswrapper[4812]: E1125 17:22:32.137840 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3bc492fe-e212-4e8b-8efc-0e0956e2e9c6" containerName="registry-server" Nov 25 17:22:32 crc kubenswrapper[4812]: I1125 17:22:32.137857 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="3bc492fe-e212-4e8b-8efc-0e0956e2e9c6" containerName="registry-server" Nov 25 17:22:32 crc kubenswrapper[4812]: E1125 17:22:32.137880 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3bc492fe-e212-4e8b-8efc-0e0956e2e9c6" containerName="extract-content" Nov 25 17:22:32 crc kubenswrapper[4812]: I1125 17:22:32.137886 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="3bc492fe-e212-4e8b-8efc-0e0956e2e9c6" containerName="extract-content" Nov 25 17:22:32 crc kubenswrapper[4812]: E1125 17:22:32.137896 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60d669de-f05f-443d-a3cb-74d5c319d7fe" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 25 17:22:32 crc kubenswrapper[4812]: I1125 17:22:32.137904 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="60d669de-f05f-443d-a3cb-74d5c319d7fe" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 25 17:22:32 crc kubenswrapper[4812]: E1125 17:22:32.137931 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3bc492fe-e212-4e8b-8efc-0e0956e2e9c6" containerName="extract-utilities" Nov 25 17:22:32 crc kubenswrapper[4812]: I1125 17:22:32.137938 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="3bc492fe-e212-4e8b-8efc-0e0956e2e9c6" containerName="extract-utilities" Nov 25 17:22:32 crc kubenswrapper[4812]: I1125 17:22:32.138123 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="60d669de-f05f-443d-a3cb-74d5c319d7fe" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 25 17:22:32 crc kubenswrapper[4812]: I1125 17:22:32.138144 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="3bc492fe-e212-4e8b-8efc-0e0956e2e9c6" containerName="registry-server" Nov 25 17:22:32 crc kubenswrapper[4812]: I1125 17:22:32.138891 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6k5qr" Nov 25 17:22:32 crc kubenswrapper[4812]: I1125 17:22:32.147254 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wtld4" Nov 25 17:22:32 crc kubenswrapper[4812]: I1125 17:22:32.147780 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 17:22:32 crc kubenswrapper[4812]: I1125 17:22:32.148900 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 17:22:32 crc kubenswrapper[4812]: I1125 17:22:32.149860 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 17:22:32 crc kubenswrapper[4812]: I1125 17:22:32.149941 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6k5qr"] Nov 25 17:22:32 crc kubenswrapper[4812]: I1125 17:22:32.150028 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 17:22:32 crc kubenswrapper[4812]: I1125 17:22:32.302356 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jkhxn\" (UniqueName: \"kubernetes.io/projected/de539db0-743b-46a6-bd4a-249c0fbd65f8-kube-api-access-jkhxn\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-6k5qr\" (UID: \"de539db0-743b-46a6-bd4a-249c0fbd65f8\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6k5qr" Nov 25 17:22:32 crc kubenswrapper[4812]: I1125 17:22:32.302415 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/de539db0-743b-46a6-bd4a-249c0fbd65f8-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-6k5qr\" (UID: \"de539db0-743b-46a6-bd4a-249c0fbd65f8\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6k5qr" Nov 25 17:22:32 crc kubenswrapper[4812]: I1125 17:22:32.302442 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/de539db0-743b-46a6-bd4a-249c0fbd65f8-ceph\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-6k5qr\" (UID: \"de539db0-743b-46a6-bd4a-249c0fbd65f8\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6k5qr" Nov 25 17:22:32 crc kubenswrapper[4812]: I1125 17:22:32.302584 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/de539db0-743b-46a6-bd4a-249c0fbd65f8-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-6k5qr\" (UID: \"de539db0-743b-46a6-bd4a-249c0fbd65f8\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6k5qr" Nov 25 17:22:32 crc kubenswrapper[4812]: I1125 17:22:32.405118 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jkhxn\" (UniqueName: \"kubernetes.io/projected/de539db0-743b-46a6-bd4a-249c0fbd65f8-kube-api-access-jkhxn\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-6k5qr\" (UID: \"de539db0-743b-46a6-bd4a-249c0fbd65f8\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6k5qr" Nov 25 17:22:32 crc kubenswrapper[4812]: I1125 17:22:32.405185 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/de539db0-743b-46a6-bd4a-249c0fbd65f8-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-6k5qr\" (UID: \"de539db0-743b-46a6-bd4a-249c0fbd65f8\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6k5qr" Nov 25 17:22:32 crc kubenswrapper[4812]: I1125 17:22:32.405218 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/de539db0-743b-46a6-bd4a-249c0fbd65f8-ceph\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-6k5qr\" (UID: \"de539db0-743b-46a6-bd4a-249c0fbd65f8\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6k5qr" Nov 25 17:22:32 crc kubenswrapper[4812]: I1125 17:22:32.405253 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/de539db0-743b-46a6-bd4a-249c0fbd65f8-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-6k5qr\" (UID: \"de539db0-743b-46a6-bd4a-249c0fbd65f8\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6k5qr" Nov 25 17:22:32 crc kubenswrapper[4812]: I1125 17:22:32.409737 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/de539db0-743b-46a6-bd4a-249c0fbd65f8-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-6k5qr\" (UID: \"de539db0-743b-46a6-bd4a-249c0fbd65f8\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6k5qr" Nov 25 17:22:32 crc kubenswrapper[4812]: I1125 17:22:32.411051 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/de539db0-743b-46a6-bd4a-249c0fbd65f8-ceph\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-6k5qr\" (UID: \"de539db0-743b-46a6-bd4a-249c0fbd65f8\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6k5qr" Nov 25 17:22:32 crc kubenswrapper[4812]: I1125 17:22:32.412176 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/de539db0-743b-46a6-bd4a-249c0fbd65f8-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-6k5qr\" (UID: \"de539db0-743b-46a6-bd4a-249c0fbd65f8\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6k5qr" Nov 25 17:22:32 crc kubenswrapper[4812]: I1125 17:22:32.424671 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jkhxn\" (UniqueName: \"kubernetes.io/projected/de539db0-743b-46a6-bd4a-249c0fbd65f8-kube-api-access-jkhxn\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-6k5qr\" (UID: \"de539db0-743b-46a6-bd4a-249c0fbd65f8\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6k5qr" Nov 25 17:22:32 crc kubenswrapper[4812]: I1125 17:22:32.469890 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6k5qr" Nov 25 17:22:32 crc kubenswrapper[4812]: I1125 17:22:32.991523 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6k5qr"] Nov 25 17:22:33 crc kubenswrapper[4812]: I1125 17:22:33.064632 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6k5qr" event={"ID":"de539db0-743b-46a6-bd4a-249c0fbd65f8","Type":"ContainerStarted","Data":"9675940de06aacd9414d59865a375526e3ef078c82cb9b8edab2d02f63211629"} Nov 25 17:22:34 crc kubenswrapper[4812]: I1125 17:22:34.090810 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6k5qr" event={"ID":"de539db0-743b-46a6-bd4a-249c0fbd65f8","Type":"ContainerStarted","Data":"522beea37d3dc1aa3cd583375f81c2ed5380e6733d9d4501d6d0930aa264e249"} Nov 25 17:22:34 crc kubenswrapper[4812]: I1125 17:22:34.135361 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6k5qr" podStartSLOduration=1.729132677 podStartE2EDuration="2.135344131s" podCreationTimestamp="2025-11-25 17:22:32 +0000 UTC" firstStartedPulling="2025-11-25 17:22:33.008835816 +0000 UTC m=+2127.848977901" lastFinishedPulling="2025-11-25 17:22:33.41504726 +0000 UTC m=+2128.255189355" observedRunningTime="2025-11-25 17:22:34.129983166 +0000 UTC m=+2128.970125261" watchObservedRunningTime="2025-11-25 17:22:34.135344131 +0000 UTC m=+2128.975486226" Nov 25 17:22:39 crc kubenswrapper[4812]: I1125 17:22:39.145008 4812 generic.go:334] "Generic (PLEG): container finished" podID="de539db0-743b-46a6-bd4a-249c0fbd65f8" containerID="522beea37d3dc1aa3cd583375f81c2ed5380e6733d9d4501d6d0930aa264e249" exitCode=0 Nov 25 17:22:39 crc kubenswrapper[4812]: I1125 17:22:39.145210 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6k5qr" event={"ID":"de539db0-743b-46a6-bd4a-249c0fbd65f8","Type":"ContainerDied","Data":"522beea37d3dc1aa3cd583375f81c2ed5380e6733d9d4501d6d0930aa264e249"} Nov 25 17:22:40 crc kubenswrapper[4812]: I1125 17:22:40.616873 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6k5qr" Nov 25 17:22:40 crc kubenswrapper[4812]: I1125 17:22:40.757478 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/de539db0-743b-46a6-bd4a-249c0fbd65f8-ceph\") pod \"de539db0-743b-46a6-bd4a-249c0fbd65f8\" (UID: \"de539db0-743b-46a6-bd4a-249c0fbd65f8\") " Nov 25 17:22:40 crc kubenswrapper[4812]: I1125 17:22:40.757662 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkhxn\" (UniqueName: \"kubernetes.io/projected/de539db0-743b-46a6-bd4a-249c0fbd65f8-kube-api-access-jkhxn\") pod \"de539db0-743b-46a6-bd4a-249c0fbd65f8\" (UID: \"de539db0-743b-46a6-bd4a-249c0fbd65f8\") " Nov 25 17:22:40 crc kubenswrapper[4812]: I1125 17:22:40.757711 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/de539db0-743b-46a6-bd4a-249c0fbd65f8-ssh-key\") pod \"de539db0-743b-46a6-bd4a-249c0fbd65f8\" (UID: \"de539db0-743b-46a6-bd4a-249c0fbd65f8\") " Nov 25 17:22:40 crc kubenswrapper[4812]: I1125 17:22:40.757805 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/de539db0-743b-46a6-bd4a-249c0fbd65f8-inventory\") pod \"de539db0-743b-46a6-bd4a-249c0fbd65f8\" (UID: \"de539db0-743b-46a6-bd4a-249c0fbd65f8\") " Nov 25 17:22:40 crc kubenswrapper[4812]: I1125 17:22:40.763130 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de539db0-743b-46a6-bd4a-249c0fbd65f8-ceph" (OuterVolumeSpecName: "ceph") pod "de539db0-743b-46a6-bd4a-249c0fbd65f8" (UID: "de539db0-743b-46a6-bd4a-249c0fbd65f8"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:22:40 crc kubenswrapper[4812]: I1125 17:22:40.763362 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de539db0-743b-46a6-bd4a-249c0fbd65f8-kube-api-access-jkhxn" (OuterVolumeSpecName: "kube-api-access-jkhxn") pod "de539db0-743b-46a6-bd4a-249c0fbd65f8" (UID: "de539db0-743b-46a6-bd4a-249c0fbd65f8"). InnerVolumeSpecName "kube-api-access-jkhxn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:22:40 crc kubenswrapper[4812]: I1125 17:22:40.786659 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de539db0-743b-46a6-bd4a-249c0fbd65f8-inventory" (OuterVolumeSpecName: "inventory") pod "de539db0-743b-46a6-bd4a-249c0fbd65f8" (UID: "de539db0-743b-46a6-bd4a-249c0fbd65f8"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:22:40 crc kubenswrapper[4812]: I1125 17:22:40.787734 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de539db0-743b-46a6-bd4a-249c0fbd65f8-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "de539db0-743b-46a6-bd4a-249c0fbd65f8" (UID: "de539db0-743b-46a6-bd4a-249c0fbd65f8"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:22:40 crc kubenswrapper[4812]: I1125 17:22:40.859805 4812 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/de539db0-743b-46a6-bd4a-249c0fbd65f8-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 17:22:40 crc kubenswrapper[4812]: I1125 17:22:40.859850 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkhxn\" (UniqueName: \"kubernetes.io/projected/de539db0-743b-46a6-bd4a-249c0fbd65f8-kube-api-access-jkhxn\") on node \"crc\" DevicePath \"\"" Nov 25 17:22:40 crc kubenswrapper[4812]: I1125 17:22:40.859862 4812 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/de539db0-743b-46a6-bd4a-249c0fbd65f8-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 17:22:40 crc kubenswrapper[4812]: I1125 17:22:40.859873 4812 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/de539db0-743b-46a6-bd4a-249c0fbd65f8-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 17:22:41 crc kubenswrapper[4812]: I1125 17:22:41.163181 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6k5qr" event={"ID":"de539db0-743b-46a6-bd4a-249c0fbd65f8","Type":"ContainerDied","Data":"9675940de06aacd9414d59865a375526e3ef078c82cb9b8edab2d02f63211629"} Nov 25 17:22:41 crc kubenswrapper[4812]: I1125 17:22:41.163754 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9675940de06aacd9414d59865a375526e3ef078c82cb9b8edab2d02f63211629" Nov 25 17:22:41 crc kubenswrapper[4812]: I1125 17:22:41.163229 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-6k5qr" Nov 25 17:22:41 crc kubenswrapper[4812]: I1125 17:22:41.247170 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-f2s6v"] Nov 25 17:22:41 crc kubenswrapper[4812]: E1125 17:22:41.247590 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de539db0-743b-46a6-bd4a-249c0fbd65f8" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 25 17:22:41 crc kubenswrapper[4812]: I1125 17:22:41.247609 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="de539db0-743b-46a6-bd4a-249c0fbd65f8" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 25 17:22:41 crc kubenswrapper[4812]: I1125 17:22:41.247789 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="de539db0-743b-46a6-bd4a-249c0fbd65f8" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 25 17:22:41 crc kubenswrapper[4812]: I1125 17:22:41.248363 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f2s6v" Nov 25 17:22:41 crc kubenswrapper[4812]: I1125 17:22:41.253400 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 17:22:41 crc kubenswrapper[4812]: I1125 17:22:41.253702 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wtld4" Nov 25 17:22:41 crc kubenswrapper[4812]: I1125 17:22:41.253407 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 17:22:41 crc kubenswrapper[4812]: I1125 17:22:41.254125 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 17:22:41 crc kubenswrapper[4812]: I1125 17:22:41.254169 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 17:22:41 crc kubenswrapper[4812]: I1125 17:22:41.259771 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-f2s6v"] Nov 25 17:22:41 crc kubenswrapper[4812]: I1125 17:22:41.369301 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/77d4dbc4-1648-4699-91e8-c0e0526da608-ceph\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-f2s6v\" (UID: \"77d4dbc4-1648-4699-91e8-c0e0526da608\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f2s6v" Nov 25 17:22:41 crc kubenswrapper[4812]: I1125 17:22:41.369337 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/77d4dbc4-1648-4699-91e8-c0e0526da608-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-f2s6v\" (UID: \"77d4dbc4-1648-4699-91e8-c0e0526da608\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f2s6v" Nov 25 17:22:41 crc kubenswrapper[4812]: I1125 17:22:41.369359 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/77d4dbc4-1648-4699-91e8-c0e0526da608-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-f2s6v\" (UID: \"77d4dbc4-1648-4699-91e8-c0e0526da608\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f2s6v" Nov 25 17:22:41 crc kubenswrapper[4812]: I1125 17:22:41.369437 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2jfh6\" (UniqueName: \"kubernetes.io/projected/77d4dbc4-1648-4699-91e8-c0e0526da608-kube-api-access-2jfh6\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-f2s6v\" (UID: \"77d4dbc4-1648-4699-91e8-c0e0526da608\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f2s6v" Nov 25 17:22:41 crc kubenswrapper[4812]: I1125 17:22:41.470877 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/77d4dbc4-1648-4699-91e8-c0e0526da608-ceph\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-f2s6v\" (UID: \"77d4dbc4-1648-4699-91e8-c0e0526da608\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f2s6v" Nov 25 17:22:41 crc kubenswrapper[4812]: I1125 17:22:41.470944 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/77d4dbc4-1648-4699-91e8-c0e0526da608-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-f2s6v\" (UID: \"77d4dbc4-1648-4699-91e8-c0e0526da608\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f2s6v" Nov 25 17:22:41 crc kubenswrapper[4812]: I1125 17:22:41.470970 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/77d4dbc4-1648-4699-91e8-c0e0526da608-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-f2s6v\" (UID: \"77d4dbc4-1648-4699-91e8-c0e0526da608\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f2s6v" Nov 25 17:22:41 crc kubenswrapper[4812]: I1125 17:22:41.471033 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2jfh6\" (UniqueName: \"kubernetes.io/projected/77d4dbc4-1648-4699-91e8-c0e0526da608-kube-api-access-2jfh6\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-f2s6v\" (UID: \"77d4dbc4-1648-4699-91e8-c0e0526da608\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f2s6v" Nov 25 17:22:41 crc kubenswrapper[4812]: I1125 17:22:41.479294 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/77d4dbc4-1648-4699-91e8-c0e0526da608-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-f2s6v\" (UID: \"77d4dbc4-1648-4699-91e8-c0e0526da608\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f2s6v" Nov 25 17:22:41 crc kubenswrapper[4812]: I1125 17:22:41.482397 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/77d4dbc4-1648-4699-91e8-c0e0526da608-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-f2s6v\" (UID: \"77d4dbc4-1648-4699-91e8-c0e0526da608\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f2s6v" Nov 25 17:22:41 crc kubenswrapper[4812]: I1125 17:22:41.489552 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/77d4dbc4-1648-4699-91e8-c0e0526da608-ceph\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-f2s6v\" (UID: \"77d4dbc4-1648-4699-91e8-c0e0526da608\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f2s6v" Nov 25 17:22:41 crc kubenswrapper[4812]: I1125 17:22:41.492971 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2jfh6\" (UniqueName: \"kubernetes.io/projected/77d4dbc4-1648-4699-91e8-c0e0526da608-kube-api-access-2jfh6\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-f2s6v\" (UID: \"77d4dbc4-1648-4699-91e8-c0e0526da608\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f2s6v" Nov 25 17:22:41 crc kubenswrapper[4812]: I1125 17:22:41.601749 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f2s6v" Nov 25 17:22:42 crc kubenswrapper[4812]: I1125 17:22:42.124102 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-f2s6v"] Nov 25 17:22:42 crc kubenswrapper[4812]: I1125 17:22:42.171492 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f2s6v" event={"ID":"77d4dbc4-1648-4699-91e8-c0e0526da608","Type":"ContainerStarted","Data":"185e82125b99fbfdd6d74e556dcc9c2372d0d8d6a0ebc735b86e5302ff4c2055"} Nov 25 17:22:43 crc kubenswrapper[4812]: I1125 17:22:43.183450 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f2s6v" event={"ID":"77d4dbc4-1648-4699-91e8-c0e0526da608","Type":"ContainerStarted","Data":"05686213f031ee0c667c88e0ce0acf77e201d277042a3d00516d5a03c6f9c46c"} Nov 25 17:22:47 crc kubenswrapper[4812]: I1125 17:22:47.711354 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f2s6v" podStartSLOduration=6.254351441 podStartE2EDuration="6.711338838s" podCreationTimestamp="2025-11-25 17:22:41 +0000 UTC" firstStartedPulling="2025-11-25 17:22:42.128397086 +0000 UTC m=+2136.968539191" lastFinishedPulling="2025-11-25 17:22:42.585384483 +0000 UTC m=+2137.425526588" observedRunningTime="2025-11-25 17:22:43.200866301 +0000 UTC m=+2138.041008396" watchObservedRunningTime="2025-11-25 17:22:47.711338838 +0000 UTC m=+2142.551480933" Nov 25 17:22:47 crc kubenswrapper[4812]: I1125 17:22:47.712584 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-zvsmw"] Nov 25 17:22:47 crc kubenswrapper[4812]: I1125 17:22:47.714415 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zvsmw" Nov 25 17:22:47 crc kubenswrapper[4812]: I1125 17:22:47.730237 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zvsmw"] Nov 25 17:22:47 crc kubenswrapper[4812]: I1125 17:22:47.779154 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gksff\" (UniqueName: \"kubernetes.io/projected/3a3064cc-bae3-47eb-a4ad-63fe2f938cc0-kube-api-access-gksff\") pod \"certified-operators-zvsmw\" (UID: \"3a3064cc-bae3-47eb-a4ad-63fe2f938cc0\") " pod="openshift-marketplace/certified-operators-zvsmw" Nov 25 17:22:47 crc kubenswrapper[4812]: I1125 17:22:47.779630 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a3064cc-bae3-47eb-a4ad-63fe2f938cc0-catalog-content\") pod \"certified-operators-zvsmw\" (UID: \"3a3064cc-bae3-47eb-a4ad-63fe2f938cc0\") " pod="openshift-marketplace/certified-operators-zvsmw" Nov 25 17:22:47 crc kubenswrapper[4812]: I1125 17:22:47.779684 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a3064cc-bae3-47eb-a4ad-63fe2f938cc0-utilities\") pod \"certified-operators-zvsmw\" (UID: \"3a3064cc-bae3-47eb-a4ad-63fe2f938cc0\") " pod="openshift-marketplace/certified-operators-zvsmw" Nov 25 17:22:47 crc kubenswrapper[4812]: I1125 17:22:47.881915 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gksff\" (UniqueName: \"kubernetes.io/projected/3a3064cc-bae3-47eb-a4ad-63fe2f938cc0-kube-api-access-gksff\") pod \"certified-operators-zvsmw\" (UID: \"3a3064cc-bae3-47eb-a4ad-63fe2f938cc0\") " pod="openshift-marketplace/certified-operators-zvsmw" Nov 25 17:22:47 crc kubenswrapper[4812]: I1125 17:22:47.882100 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a3064cc-bae3-47eb-a4ad-63fe2f938cc0-catalog-content\") pod \"certified-operators-zvsmw\" (UID: \"3a3064cc-bae3-47eb-a4ad-63fe2f938cc0\") " pod="openshift-marketplace/certified-operators-zvsmw" Nov 25 17:22:47 crc kubenswrapper[4812]: I1125 17:22:47.882180 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a3064cc-bae3-47eb-a4ad-63fe2f938cc0-utilities\") pod \"certified-operators-zvsmw\" (UID: \"3a3064cc-bae3-47eb-a4ad-63fe2f938cc0\") " pod="openshift-marketplace/certified-operators-zvsmw" Nov 25 17:22:47 crc kubenswrapper[4812]: I1125 17:22:47.883032 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a3064cc-bae3-47eb-a4ad-63fe2f938cc0-utilities\") pod \"certified-operators-zvsmw\" (UID: \"3a3064cc-bae3-47eb-a4ad-63fe2f938cc0\") " pod="openshift-marketplace/certified-operators-zvsmw" Nov 25 17:22:47 crc kubenswrapper[4812]: I1125 17:22:47.883561 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a3064cc-bae3-47eb-a4ad-63fe2f938cc0-catalog-content\") pod \"certified-operators-zvsmw\" (UID: \"3a3064cc-bae3-47eb-a4ad-63fe2f938cc0\") " pod="openshift-marketplace/certified-operators-zvsmw" Nov 25 17:22:47 crc kubenswrapper[4812]: I1125 17:22:47.902686 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gksff\" (UniqueName: \"kubernetes.io/projected/3a3064cc-bae3-47eb-a4ad-63fe2f938cc0-kube-api-access-gksff\") pod \"certified-operators-zvsmw\" (UID: \"3a3064cc-bae3-47eb-a4ad-63fe2f938cc0\") " pod="openshift-marketplace/certified-operators-zvsmw" Nov 25 17:22:48 crc kubenswrapper[4812]: I1125 17:22:48.039356 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zvsmw" Nov 25 17:22:48 crc kubenswrapper[4812]: I1125 17:22:48.534391 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zvsmw"] Nov 25 17:22:49 crc kubenswrapper[4812]: I1125 17:22:49.239851 4812 generic.go:334] "Generic (PLEG): container finished" podID="3a3064cc-bae3-47eb-a4ad-63fe2f938cc0" containerID="2ebec0552101c4043d2656f789db26a5642879f4cc5603bd58d6c343c6ad93b4" exitCode=0 Nov 25 17:22:49 crc kubenswrapper[4812]: I1125 17:22:49.240038 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zvsmw" event={"ID":"3a3064cc-bae3-47eb-a4ad-63fe2f938cc0","Type":"ContainerDied","Data":"2ebec0552101c4043d2656f789db26a5642879f4cc5603bd58d6c343c6ad93b4"} Nov 25 17:22:49 crc kubenswrapper[4812]: I1125 17:22:49.240167 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zvsmw" event={"ID":"3a3064cc-bae3-47eb-a4ad-63fe2f938cc0","Type":"ContainerStarted","Data":"aab234a74310b0e3751710b4c4b629c8e6267aa55c9b7075204ba88ea2a95d53"} Nov 25 17:22:50 crc kubenswrapper[4812]: I1125 17:22:50.252604 4812 generic.go:334] "Generic (PLEG): container finished" podID="3a3064cc-bae3-47eb-a4ad-63fe2f938cc0" containerID="1a07b9fc8fc2b4f1d06b1156a43968a2da39d1303efaaae1e6d280b8fb0947b2" exitCode=0 Nov 25 17:22:50 crc kubenswrapper[4812]: I1125 17:22:50.252692 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zvsmw" event={"ID":"3a3064cc-bae3-47eb-a4ad-63fe2f938cc0","Type":"ContainerDied","Data":"1a07b9fc8fc2b4f1d06b1156a43968a2da39d1303efaaae1e6d280b8fb0947b2"} Nov 25 17:22:51 crc kubenswrapper[4812]: I1125 17:22:51.263451 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zvsmw" event={"ID":"3a3064cc-bae3-47eb-a4ad-63fe2f938cc0","Type":"ContainerStarted","Data":"4d38b289b31e9d380592a39139ade2248f36701cf6b71778cc7e0e4bb58963c3"} Nov 25 17:22:51 crc kubenswrapper[4812]: I1125 17:22:51.291467 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-zvsmw" podStartSLOduration=2.858076346 podStartE2EDuration="4.291444921s" podCreationTimestamp="2025-11-25 17:22:47 +0000 UTC" firstStartedPulling="2025-11-25 17:22:49.24195635 +0000 UTC m=+2144.082098445" lastFinishedPulling="2025-11-25 17:22:50.675324925 +0000 UTC m=+2145.515467020" observedRunningTime="2025-11-25 17:22:51.28290638 +0000 UTC m=+2146.123048475" watchObservedRunningTime="2025-11-25 17:22:51.291444921 +0000 UTC m=+2146.131587016" Nov 25 17:22:58 crc kubenswrapper[4812]: I1125 17:22:58.040473 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-zvsmw" Nov 25 17:22:58 crc kubenswrapper[4812]: I1125 17:22:58.041088 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-zvsmw" Nov 25 17:22:58 crc kubenswrapper[4812]: I1125 17:22:58.082511 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-zvsmw" Nov 25 17:22:58 crc kubenswrapper[4812]: I1125 17:22:58.376244 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-zvsmw" Nov 25 17:22:58 crc kubenswrapper[4812]: I1125 17:22:58.426942 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zvsmw"] Nov 25 17:23:00 crc kubenswrapper[4812]: I1125 17:23:00.342632 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-zvsmw" podUID="3a3064cc-bae3-47eb-a4ad-63fe2f938cc0" containerName="registry-server" containerID="cri-o://4d38b289b31e9d380592a39139ade2248f36701cf6b71778cc7e0e4bb58963c3" gracePeriod=2 Nov 25 17:23:01 crc kubenswrapper[4812]: I1125 17:23:01.049313 4812 scope.go:117] "RemoveContainer" containerID="5f59b7f13edac07b4be13a573a40afaa4b0c6f16fd798ba9b3c95d95c185aa2f" Nov 25 17:23:01 crc kubenswrapper[4812]: I1125 17:23:01.082500 4812 scope.go:117] "RemoveContainer" containerID="01cb517fafac99171c64e593506726af83c10df3bb73c3aa6f42fd4bd97aa78d" Nov 25 17:23:01 crc kubenswrapper[4812]: I1125 17:23:01.315296 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zvsmw" Nov 25 17:23:01 crc kubenswrapper[4812]: I1125 17:23:01.352599 4812 generic.go:334] "Generic (PLEG): container finished" podID="3a3064cc-bae3-47eb-a4ad-63fe2f938cc0" containerID="4d38b289b31e9d380592a39139ade2248f36701cf6b71778cc7e0e4bb58963c3" exitCode=0 Nov 25 17:23:01 crc kubenswrapper[4812]: I1125 17:23:01.352638 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zvsmw" event={"ID":"3a3064cc-bae3-47eb-a4ad-63fe2f938cc0","Type":"ContainerDied","Data":"4d38b289b31e9d380592a39139ade2248f36701cf6b71778cc7e0e4bb58963c3"} Nov 25 17:23:01 crc kubenswrapper[4812]: I1125 17:23:01.352670 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zvsmw" event={"ID":"3a3064cc-bae3-47eb-a4ad-63fe2f938cc0","Type":"ContainerDied","Data":"aab234a74310b0e3751710b4c4b629c8e6267aa55c9b7075204ba88ea2a95d53"} Nov 25 17:23:01 crc kubenswrapper[4812]: I1125 17:23:01.352691 4812 scope.go:117] "RemoveContainer" containerID="4d38b289b31e9d380592a39139ade2248f36701cf6b71778cc7e0e4bb58963c3" Nov 25 17:23:01 crc kubenswrapper[4812]: I1125 17:23:01.352693 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zvsmw" Nov 25 17:23:01 crc kubenswrapper[4812]: I1125 17:23:01.374277 4812 scope.go:117] "RemoveContainer" containerID="1a07b9fc8fc2b4f1d06b1156a43968a2da39d1303efaaae1e6d280b8fb0947b2" Nov 25 17:23:01 crc kubenswrapper[4812]: I1125 17:23:01.395506 4812 scope.go:117] "RemoveContainer" containerID="2ebec0552101c4043d2656f789db26a5642879f4cc5603bd58d6c343c6ad93b4" Nov 25 17:23:01 crc kubenswrapper[4812]: I1125 17:23:01.411660 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a3064cc-bae3-47eb-a4ad-63fe2f938cc0-catalog-content\") pod \"3a3064cc-bae3-47eb-a4ad-63fe2f938cc0\" (UID: \"3a3064cc-bae3-47eb-a4ad-63fe2f938cc0\") " Nov 25 17:23:01 crc kubenswrapper[4812]: I1125 17:23:01.411781 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a3064cc-bae3-47eb-a4ad-63fe2f938cc0-utilities\") pod \"3a3064cc-bae3-47eb-a4ad-63fe2f938cc0\" (UID: \"3a3064cc-bae3-47eb-a4ad-63fe2f938cc0\") " Nov 25 17:23:01 crc kubenswrapper[4812]: I1125 17:23:01.411912 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gksff\" (UniqueName: \"kubernetes.io/projected/3a3064cc-bae3-47eb-a4ad-63fe2f938cc0-kube-api-access-gksff\") pod \"3a3064cc-bae3-47eb-a4ad-63fe2f938cc0\" (UID: \"3a3064cc-bae3-47eb-a4ad-63fe2f938cc0\") " Nov 25 17:23:01 crc kubenswrapper[4812]: I1125 17:23:01.413001 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a3064cc-bae3-47eb-a4ad-63fe2f938cc0-utilities" (OuterVolumeSpecName: "utilities") pod "3a3064cc-bae3-47eb-a4ad-63fe2f938cc0" (UID: "3a3064cc-bae3-47eb-a4ad-63fe2f938cc0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:23:01 crc kubenswrapper[4812]: I1125 17:23:01.416205 4812 scope.go:117] "RemoveContainer" containerID="4d38b289b31e9d380592a39139ade2248f36701cf6b71778cc7e0e4bb58963c3" Nov 25 17:23:01 crc kubenswrapper[4812]: E1125 17:23:01.417297 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d38b289b31e9d380592a39139ade2248f36701cf6b71778cc7e0e4bb58963c3\": container with ID starting with 4d38b289b31e9d380592a39139ade2248f36701cf6b71778cc7e0e4bb58963c3 not found: ID does not exist" containerID="4d38b289b31e9d380592a39139ade2248f36701cf6b71778cc7e0e4bb58963c3" Nov 25 17:23:01 crc kubenswrapper[4812]: I1125 17:23:01.417517 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d38b289b31e9d380592a39139ade2248f36701cf6b71778cc7e0e4bb58963c3"} err="failed to get container status \"4d38b289b31e9d380592a39139ade2248f36701cf6b71778cc7e0e4bb58963c3\": rpc error: code = NotFound desc = could not find container \"4d38b289b31e9d380592a39139ade2248f36701cf6b71778cc7e0e4bb58963c3\": container with ID starting with 4d38b289b31e9d380592a39139ade2248f36701cf6b71778cc7e0e4bb58963c3 not found: ID does not exist" Nov 25 17:23:01 crc kubenswrapper[4812]: I1125 17:23:01.417558 4812 scope.go:117] "RemoveContainer" containerID="1a07b9fc8fc2b4f1d06b1156a43968a2da39d1303efaaae1e6d280b8fb0947b2" Nov 25 17:23:01 crc kubenswrapper[4812]: I1125 17:23:01.418271 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a3064cc-bae3-47eb-a4ad-63fe2f938cc0-kube-api-access-gksff" (OuterVolumeSpecName: "kube-api-access-gksff") pod "3a3064cc-bae3-47eb-a4ad-63fe2f938cc0" (UID: "3a3064cc-bae3-47eb-a4ad-63fe2f938cc0"). InnerVolumeSpecName "kube-api-access-gksff". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:23:01 crc kubenswrapper[4812]: E1125 17:23:01.418754 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1a07b9fc8fc2b4f1d06b1156a43968a2da39d1303efaaae1e6d280b8fb0947b2\": container with ID starting with 1a07b9fc8fc2b4f1d06b1156a43968a2da39d1303efaaae1e6d280b8fb0947b2 not found: ID does not exist" containerID="1a07b9fc8fc2b4f1d06b1156a43968a2da39d1303efaaae1e6d280b8fb0947b2" Nov 25 17:23:01 crc kubenswrapper[4812]: I1125 17:23:01.418821 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1a07b9fc8fc2b4f1d06b1156a43968a2da39d1303efaaae1e6d280b8fb0947b2"} err="failed to get container status \"1a07b9fc8fc2b4f1d06b1156a43968a2da39d1303efaaae1e6d280b8fb0947b2\": rpc error: code = NotFound desc = could not find container \"1a07b9fc8fc2b4f1d06b1156a43968a2da39d1303efaaae1e6d280b8fb0947b2\": container with ID starting with 1a07b9fc8fc2b4f1d06b1156a43968a2da39d1303efaaae1e6d280b8fb0947b2 not found: ID does not exist" Nov 25 17:23:01 crc kubenswrapper[4812]: I1125 17:23:01.418853 4812 scope.go:117] "RemoveContainer" containerID="2ebec0552101c4043d2656f789db26a5642879f4cc5603bd58d6c343c6ad93b4" Nov 25 17:23:01 crc kubenswrapper[4812]: E1125 17:23:01.421495 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2ebec0552101c4043d2656f789db26a5642879f4cc5603bd58d6c343c6ad93b4\": container with ID starting with 2ebec0552101c4043d2656f789db26a5642879f4cc5603bd58d6c343c6ad93b4 not found: ID does not exist" containerID="2ebec0552101c4043d2656f789db26a5642879f4cc5603bd58d6c343c6ad93b4" Nov 25 17:23:01 crc kubenswrapper[4812]: I1125 17:23:01.421562 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ebec0552101c4043d2656f789db26a5642879f4cc5603bd58d6c343c6ad93b4"} err="failed to get container status \"2ebec0552101c4043d2656f789db26a5642879f4cc5603bd58d6c343c6ad93b4\": rpc error: code = NotFound desc = could not find container \"2ebec0552101c4043d2656f789db26a5642879f4cc5603bd58d6c343c6ad93b4\": container with ID starting with 2ebec0552101c4043d2656f789db26a5642879f4cc5603bd58d6c343c6ad93b4 not found: ID does not exist" Nov 25 17:23:01 crc kubenswrapper[4812]: I1125 17:23:01.458238 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a3064cc-bae3-47eb-a4ad-63fe2f938cc0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3a3064cc-bae3-47eb-a4ad-63fe2f938cc0" (UID: "3a3064cc-bae3-47eb-a4ad-63fe2f938cc0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:23:01 crc kubenswrapper[4812]: I1125 17:23:01.514170 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gksff\" (UniqueName: \"kubernetes.io/projected/3a3064cc-bae3-47eb-a4ad-63fe2f938cc0-kube-api-access-gksff\") on node \"crc\" DevicePath \"\"" Nov 25 17:23:01 crc kubenswrapper[4812]: I1125 17:23:01.514199 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a3064cc-bae3-47eb-a4ad-63fe2f938cc0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:23:01 crc kubenswrapper[4812]: I1125 17:23:01.514209 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a3064cc-bae3-47eb-a4ad-63fe2f938cc0-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:23:01 crc kubenswrapper[4812]: I1125 17:23:01.685171 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zvsmw"] Nov 25 17:23:01 crc kubenswrapper[4812]: I1125 17:23:01.692268 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-zvsmw"] Nov 25 17:23:01 crc kubenswrapper[4812]: I1125 17:23:01.840050 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a3064cc-bae3-47eb-a4ad-63fe2f938cc0" path="/var/lib/kubelet/pods/3a3064cc-bae3-47eb-a4ad-63fe2f938cc0/volumes" Nov 25 17:23:19 crc kubenswrapper[4812]: I1125 17:23:19.518103 4812 generic.go:334] "Generic (PLEG): container finished" podID="77d4dbc4-1648-4699-91e8-c0e0526da608" containerID="05686213f031ee0c667c88e0ce0acf77e201d277042a3d00516d5a03c6f9c46c" exitCode=0 Nov 25 17:23:19 crc kubenswrapper[4812]: I1125 17:23:19.518185 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f2s6v" event={"ID":"77d4dbc4-1648-4699-91e8-c0e0526da608","Type":"ContainerDied","Data":"05686213f031ee0c667c88e0ce0acf77e201d277042a3d00516d5a03c6f9c46c"} Nov 25 17:23:20 crc kubenswrapper[4812]: I1125 17:23:20.898679 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f2s6v" Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.058744 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/77d4dbc4-1648-4699-91e8-c0e0526da608-ssh-key\") pod \"77d4dbc4-1648-4699-91e8-c0e0526da608\" (UID: \"77d4dbc4-1648-4699-91e8-c0e0526da608\") " Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.058827 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/77d4dbc4-1648-4699-91e8-c0e0526da608-ceph\") pod \"77d4dbc4-1648-4699-91e8-c0e0526da608\" (UID: \"77d4dbc4-1648-4699-91e8-c0e0526da608\") " Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.058910 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2jfh6\" (UniqueName: \"kubernetes.io/projected/77d4dbc4-1648-4699-91e8-c0e0526da608-kube-api-access-2jfh6\") pod \"77d4dbc4-1648-4699-91e8-c0e0526da608\" (UID: \"77d4dbc4-1648-4699-91e8-c0e0526da608\") " Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.059034 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/77d4dbc4-1648-4699-91e8-c0e0526da608-inventory\") pod \"77d4dbc4-1648-4699-91e8-c0e0526da608\" (UID: \"77d4dbc4-1648-4699-91e8-c0e0526da608\") " Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.066115 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77d4dbc4-1648-4699-91e8-c0e0526da608-kube-api-access-2jfh6" (OuterVolumeSpecName: "kube-api-access-2jfh6") pod "77d4dbc4-1648-4699-91e8-c0e0526da608" (UID: "77d4dbc4-1648-4699-91e8-c0e0526da608"). InnerVolumeSpecName "kube-api-access-2jfh6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.067749 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77d4dbc4-1648-4699-91e8-c0e0526da608-ceph" (OuterVolumeSpecName: "ceph") pod "77d4dbc4-1648-4699-91e8-c0e0526da608" (UID: "77d4dbc4-1648-4699-91e8-c0e0526da608"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.089192 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77d4dbc4-1648-4699-91e8-c0e0526da608-inventory" (OuterVolumeSpecName: "inventory") pod "77d4dbc4-1648-4699-91e8-c0e0526da608" (UID: "77d4dbc4-1648-4699-91e8-c0e0526da608"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.095765 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77d4dbc4-1648-4699-91e8-c0e0526da608-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "77d4dbc4-1648-4699-91e8-c0e0526da608" (UID: "77d4dbc4-1648-4699-91e8-c0e0526da608"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.160499 4812 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/77d4dbc4-1648-4699-91e8-c0e0526da608-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.160560 4812 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/77d4dbc4-1648-4699-91e8-c0e0526da608-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.160571 4812 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/77d4dbc4-1648-4699-91e8-c0e0526da608-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.160583 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2jfh6\" (UniqueName: \"kubernetes.io/projected/77d4dbc4-1648-4699-91e8-c0e0526da608-kube-api-access-2jfh6\") on node \"crc\" DevicePath \"\"" Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.541087 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f2s6v" event={"ID":"77d4dbc4-1648-4699-91e8-c0e0526da608","Type":"ContainerDied","Data":"185e82125b99fbfdd6d74e556dcc9c2372d0d8d6a0ebc735b86e5302ff4c2055"} Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.541135 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="185e82125b99fbfdd6d74e556dcc9c2372d0d8d6a0ebc735b86e5302ff4c2055" Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.541200 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-f2s6v" Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.616013 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-8nhn7"] Nov 25 17:23:21 crc kubenswrapper[4812]: E1125 17:23:21.616351 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a3064cc-bae3-47eb-a4ad-63fe2f938cc0" containerName="registry-server" Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.616363 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a3064cc-bae3-47eb-a4ad-63fe2f938cc0" containerName="registry-server" Nov 25 17:23:21 crc kubenswrapper[4812]: E1125 17:23:21.616390 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a3064cc-bae3-47eb-a4ad-63fe2f938cc0" containerName="extract-utilities" Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.616396 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a3064cc-bae3-47eb-a4ad-63fe2f938cc0" containerName="extract-utilities" Nov 25 17:23:21 crc kubenswrapper[4812]: E1125 17:23:21.616405 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a3064cc-bae3-47eb-a4ad-63fe2f938cc0" containerName="extract-content" Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.616411 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a3064cc-bae3-47eb-a4ad-63fe2f938cc0" containerName="extract-content" Nov 25 17:23:21 crc kubenswrapper[4812]: E1125 17:23:21.616436 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77d4dbc4-1648-4699-91e8-c0e0526da608" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.616443 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="77d4dbc4-1648-4699-91e8-c0e0526da608" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.616615 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="77d4dbc4-1648-4699-91e8-c0e0526da608" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.616635 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a3064cc-bae3-47eb-a4ad-63fe2f938cc0" containerName="registry-server" Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.617177 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-8nhn7" Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.620644 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.621112 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.621266 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wtld4" Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.622699 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.623550 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.639492 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-8nhn7"] Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.770376 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5ab47d19-e64f-414b-99ce-314ee117f247-ceph\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-8nhn7\" (UID: \"5ab47d19-e64f-414b-99ce-314ee117f247\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-8nhn7" Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.771005 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kxbst\" (UniqueName: \"kubernetes.io/projected/5ab47d19-e64f-414b-99ce-314ee117f247-kube-api-access-kxbst\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-8nhn7\" (UID: \"5ab47d19-e64f-414b-99ce-314ee117f247\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-8nhn7" Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.771207 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5ab47d19-e64f-414b-99ce-314ee117f247-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-8nhn7\" (UID: \"5ab47d19-e64f-414b-99ce-314ee117f247\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-8nhn7" Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.771569 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5ab47d19-e64f-414b-99ce-314ee117f247-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-8nhn7\" (UID: \"5ab47d19-e64f-414b-99ce-314ee117f247\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-8nhn7" Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.872609 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5ab47d19-e64f-414b-99ce-314ee117f247-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-8nhn7\" (UID: \"5ab47d19-e64f-414b-99ce-314ee117f247\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-8nhn7" Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.872751 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5ab47d19-e64f-414b-99ce-314ee117f247-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-8nhn7\" (UID: \"5ab47d19-e64f-414b-99ce-314ee117f247\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-8nhn7" Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.872790 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5ab47d19-e64f-414b-99ce-314ee117f247-ceph\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-8nhn7\" (UID: \"5ab47d19-e64f-414b-99ce-314ee117f247\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-8nhn7" Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.872816 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kxbst\" (UniqueName: \"kubernetes.io/projected/5ab47d19-e64f-414b-99ce-314ee117f247-kube-api-access-kxbst\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-8nhn7\" (UID: \"5ab47d19-e64f-414b-99ce-314ee117f247\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-8nhn7" Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.876931 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5ab47d19-e64f-414b-99ce-314ee117f247-ceph\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-8nhn7\" (UID: \"5ab47d19-e64f-414b-99ce-314ee117f247\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-8nhn7" Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.877798 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5ab47d19-e64f-414b-99ce-314ee117f247-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-8nhn7\" (UID: \"5ab47d19-e64f-414b-99ce-314ee117f247\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-8nhn7" Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.877902 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5ab47d19-e64f-414b-99ce-314ee117f247-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-8nhn7\" (UID: \"5ab47d19-e64f-414b-99ce-314ee117f247\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-8nhn7" Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.893918 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kxbst\" (UniqueName: \"kubernetes.io/projected/5ab47d19-e64f-414b-99ce-314ee117f247-kube-api-access-kxbst\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-8nhn7\" (UID: \"5ab47d19-e64f-414b-99ce-314ee117f247\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-8nhn7" Nov 25 17:23:21 crc kubenswrapper[4812]: I1125 17:23:21.955940 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-8nhn7" Nov 25 17:23:22 crc kubenswrapper[4812]: I1125 17:23:22.490697 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-8nhn7"] Nov 25 17:23:22 crc kubenswrapper[4812]: I1125 17:23:22.550967 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-8nhn7" event={"ID":"5ab47d19-e64f-414b-99ce-314ee117f247","Type":"ContainerStarted","Data":"d156457cab2c73aa61cb6466eca2c45d64f113c3e250f8afbd90a9a6910c416b"} Nov 25 17:23:23 crc kubenswrapper[4812]: I1125 17:23:23.560623 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-8nhn7" event={"ID":"5ab47d19-e64f-414b-99ce-314ee117f247","Type":"ContainerStarted","Data":"ce9a55c739c7cbf363e6c34476d2daa294bc1f87f276aeaf5736e9e04d3783f4"} Nov 25 17:23:23 crc kubenswrapper[4812]: I1125 17:23:23.585984 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-8nhn7" podStartSLOduration=2.134932437 podStartE2EDuration="2.585957532s" podCreationTimestamp="2025-11-25 17:23:21 +0000 UTC" firstStartedPulling="2025-11-25 17:23:22.498132493 +0000 UTC m=+2177.338274588" lastFinishedPulling="2025-11-25 17:23:22.949157578 +0000 UTC m=+2177.789299683" observedRunningTime="2025-11-25 17:23:23.578006237 +0000 UTC m=+2178.418148342" watchObservedRunningTime="2025-11-25 17:23:23.585957532 +0000 UTC m=+2178.426099627" Nov 25 17:23:27 crc kubenswrapper[4812]: I1125 17:23:27.592763 4812 generic.go:334] "Generic (PLEG): container finished" podID="5ab47d19-e64f-414b-99ce-314ee117f247" containerID="ce9a55c739c7cbf363e6c34476d2daa294bc1f87f276aeaf5736e9e04d3783f4" exitCode=0 Nov 25 17:23:27 crc kubenswrapper[4812]: I1125 17:23:27.592832 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-8nhn7" event={"ID":"5ab47d19-e64f-414b-99ce-314ee117f247","Type":"ContainerDied","Data":"ce9a55c739c7cbf363e6c34476d2daa294bc1f87f276aeaf5736e9e04d3783f4"} Nov 25 17:23:28 crc kubenswrapper[4812]: I1125 17:23:28.999127 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-8nhn7" Nov 25 17:23:29 crc kubenswrapper[4812]: I1125 17:23:29.009625 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5ab47d19-e64f-414b-99ce-314ee117f247-ceph\") pod \"5ab47d19-e64f-414b-99ce-314ee117f247\" (UID: \"5ab47d19-e64f-414b-99ce-314ee117f247\") " Nov 25 17:23:29 crc kubenswrapper[4812]: I1125 17:23:29.009693 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5ab47d19-e64f-414b-99ce-314ee117f247-ssh-key\") pod \"5ab47d19-e64f-414b-99ce-314ee117f247\" (UID: \"5ab47d19-e64f-414b-99ce-314ee117f247\") " Nov 25 17:23:29 crc kubenswrapper[4812]: I1125 17:23:29.009735 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kxbst\" (UniqueName: \"kubernetes.io/projected/5ab47d19-e64f-414b-99ce-314ee117f247-kube-api-access-kxbst\") pod \"5ab47d19-e64f-414b-99ce-314ee117f247\" (UID: \"5ab47d19-e64f-414b-99ce-314ee117f247\") " Nov 25 17:23:29 crc kubenswrapper[4812]: I1125 17:23:29.009820 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5ab47d19-e64f-414b-99ce-314ee117f247-inventory\") pod \"5ab47d19-e64f-414b-99ce-314ee117f247\" (UID: \"5ab47d19-e64f-414b-99ce-314ee117f247\") " Nov 25 17:23:29 crc kubenswrapper[4812]: I1125 17:23:29.015741 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ab47d19-e64f-414b-99ce-314ee117f247-ceph" (OuterVolumeSpecName: "ceph") pod "5ab47d19-e64f-414b-99ce-314ee117f247" (UID: "5ab47d19-e64f-414b-99ce-314ee117f247"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:23:29 crc kubenswrapper[4812]: I1125 17:23:29.017146 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ab47d19-e64f-414b-99ce-314ee117f247-kube-api-access-kxbst" (OuterVolumeSpecName: "kube-api-access-kxbst") pod "5ab47d19-e64f-414b-99ce-314ee117f247" (UID: "5ab47d19-e64f-414b-99ce-314ee117f247"). InnerVolumeSpecName "kube-api-access-kxbst". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:23:29 crc kubenswrapper[4812]: I1125 17:23:29.037770 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ab47d19-e64f-414b-99ce-314ee117f247-inventory" (OuterVolumeSpecName: "inventory") pod "5ab47d19-e64f-414b-99ce-314ee117f247" (UID: "5ab47d19-e64f-414b-99ce-314ee117f247"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:23:29 crc kubenswrapper[4812]: I1125 17:23:29.038148 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ab47d19-e64f-414b-99ce-314ee117f247-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "5ab47d19-e64f-414b-99ce-314ee117f247" (UID: "5ab47d19-e64f-414b-99ce-314ee117f247"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:23:29 crc kubenswrapper[4812]: I1125 17:23:29.111663 4812 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5ab47d19-e64f-414b-99ce-314ee117f247-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 17:23:29 crc kubenswrapper[4812]: I1125 17:23:29.111688 4812 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5ab47d19-e64f-414b-99ce-314ee117f247-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 17:23:29 crc kubenswrapper[4812]: I1125 17:23:29.111696 4812 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5ab47d19-e64f-414b-99ce-314ee117f247-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 17:23:29 crc kubenswrapper[4812]: I1125 17:23:29.111706 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kxbst\" (UniqueName: \"kubernetes.io/projected/5ab47d19-e64f-414b-99ce-314ee117f247-kube-api-access-kxbst\") on node \"crc\" DevicePath \"\"" Nov 25 17:23:29 crc kubenswrapper[4812]: I1125 17:23:29.611782 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-8nhn7" event={"ID":"5ab47d19-e64f-414b-99ce-314ee117f247","Type":"ContainerDied","Data":"d156457cab2c73aa61cb6466eca2c45d64f113c3e250f8afbd90a9a6910c416b"} Nov 25 17:23:29 crc kubenswrapper[4812]: I1125 17:23:29.611844 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d156457cab2c73aa61cb6466eca2c45d64f113c3e250f8afbd90a9a6910c416b" Nov 25 17:23:29 crc kubenswrapper[4812]: I1125 17:23:29.611871 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-8nhn7" Nov 25 17:23:29 crc kubenswrapper[4812]: I1125 17:23:29.676687 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-xnpgj"] Nov 25 17:23:29 crc kubenswrapper[4812]: E1125 17:23:29.677116 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ab47d19-e64f-414b-99ce-314ee117f247" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 25 17:23:29 crc kubenswrapper[4812]: I1125 17:23:29.677139 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ab47d19-e64f-414b-99ce-314ee117f247" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 25 17:23:29 crc kubenswrapper[4812]: I1125 17:23:29.677348 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ab47d19-e64f-414b-99ce-314ee117f247" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 25 17:23:29 crc kubenswrapper[4812]: I1125 17:23:29.678067 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-xnpgj" Nov 25 17:23:29 crc kubenswrapper[4812]: I1125 17:23:29.679885 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 17:23:29 crc kubenswrapper[4812]: I1125 17:23:29.680085 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wtld4" Nov 25 17:23:29 crc kubenswrapper[4812]: I1125 17:23:29.680683 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 17:23:29 crc kubenswrapper[4812]: I1125 17:23:29.681053 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 17:23:29 crc kubenswrapper[4812]: I1125 17:23:29.685839 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-xnpgj"] Nov 25 17:23:29 crc kubenswrapper[4812]: I1125 17:23:29.687519 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 17:23:29 crc kubenswrapper[4812]: I1125 17:23:29.821029 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-xnpgj\" (UID: \"ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-xnpgj" Nov 25 17:23:29 crc kubenswrapper[4812]: I1125 17:23:29.821095 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tpf2c\" (UniqueName: \"kubernetes.io/projected/ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2-kube-api-access-tpf2c\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-xnpgj\" (UID: \"ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-xnpgj" Nov 25 17:23:29 crc kubenswrapper[4812]: I1125 17:23:29.821153 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-xnpgj\" (UID: \"ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-xnpgj" Nov 25 17:23:29 crc kubenswrapper[4812]: I1125 17:23:29.821190 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2-ceph\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-xnpgj\" (UID: \"ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-xnpgj" Nov 25 17:23:29 crc kubenswrapper[4812]: I1125 17:23:29.922876 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-xnpgj\" (UID: \"ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-xnpgj" Nov 25 17:23:29 crc kubenswrapper[4812]: I1125 17:23:29.923233 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tpf2c\" (UniqueName: \"kubernetes.io/projected/ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2-kube-api-access-tpf2c\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-xnpgj\" (UID: \"ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-xnpgj" Nov 25 17:23:29 crc kubenswrapper[4812]: I1125 17:23:29.923289 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-xnpgj\" (UID: \"ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-xnpgj" Nov 25 17:23:29 crc kubenswrapper[4812]: I1125 17:23:29.923325 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2-ceph\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-xnpgj\" (UID: \"ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-xnpgj" Nov 25 17:23:29 crc kubenswrapper[4812]: I1125 17:23:29.928200 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-xnpgj\" (UID: \"ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-xnpgj" Nov 25 17:23:29 crc kubenswrapper[4812]: I1125 17:23:29.928218 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2-ceph\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-xnpgj\" (UID: \"ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-xnpgj" Nov 25 17:23:29 crc kubenswrapper[4812]: I1125 17:23:29.928674 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-xnpgj\" (UID: \"ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-xnpgj" Nov 25 17:23:29 crc kubenswrapper[4812]: I1125 17:23:29.945091 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tpf2c\" (UniqueName: \"kubernetes.io/projected/ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2-kube-api-access-tpf2c\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-xnpgj\" (UID: \"ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-xnpgj" Nov 25 17:23:29 crc kubenswrapper[4812]: I1125 17:23:29.997870 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-xnpgj" Nov 25 17:23:30 crc kubenswrapper[4812]: I1125 17:23:30.486314 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-xnpgj"] Nov 25 17:23:30 crc kubenswrapper[4812]: I1125 17:23:30.621956 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5m82l"] Nov 25 17:23:30 crc kubenswrapper[4812]: I1125 17:23:30.624252 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-xnpgj" event={"ID":"ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2","Type":"ContainerStarted","Data":"a78776cf914637fd680f8af142dbb1e41d614664babc8e4a94eb520c14f60c72"} Nov 25 17:23:30 crc kubenswrapper[4812]: I1125 17:23:30.624398 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5m82l" Nov 25 17:23:30 crc kubenswrapper[4812]: I1125 17:23:30.636378 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lw8k7\" (UniqueName: \"kubernetes.io/projected/e05cbfa5-996d-48c8-999d-2ed43387748d-kube-api-access-lw8k7\") pod \"redhat-operators-5m82l\" (UID: \"e05cbfa5-996d-48c8-999d-2ed43387748d\") " pod="openshift-marketplace/redhat-operators-5m82l" Nov 25 17:23:30 crc kubenswrapper[4812]: I1125 17:23:30.636610 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e05cbfa5-996d-48c8-999d-2ed43387748d-utilities\") pod \"redhat-operators-5m82l\" (UID: \"e05cbfa5-996d-48c8-999d-2ed43387748d\") " pod="openshift-marketplace/redhat-operators-5m82l" Nov 25 17:23:30 crc kubenswrapper[4812]: I1125 17:23:30.636706 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e05cbfa5-996d-48c8-999d-2ed43387748d-catalog-content\") pod \"redhat-operators-5m82l\" (UID: \"e05cbfa5-996d-48c8-999d-2ed43387748d\") " pod="openshift-marketplace/redhat-operators-5m82l" Nov 25 17:23:30 crc kubenswrapper[4812]: I1125 17:23:30.653277 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5m82l"] Nov 25 17:23:30 crc kubenswrapper[4812]: I1125 17:23:30.737865 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lw8k7\" (UniqueName: \"kubernetes.io/projected/e05cbfa5-996d-48c8-999d-2ed43387748d-kube-api-access-lw8k7\") pod \"redhat-operators-5m82l\" (UID: \"e05cbfa5-996d-48c8-999d-2ed43387748d\") " pod="openshift-marketplace/redhat-operators-5m82l" Nov 25 17:23:30 crc kubenswrapper[4812]: I1125 17:23:30.737977 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e05cbfa5-996d-48c8-999d-2ed43387748d-utilities\") pod \"redhat-operators-5m82l\" (UID: \"e05cbfa5-996d-48c8-999d-2ed43387748d\") " pod="openshift-marketplace/redhat-operators-5m82l" Nov 25 17:23:30 crc kubenswrapper[4812]: I1125 17:23:30.738022 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e05cbfa5-996d-48c8-999d-2ed43387748d-catalog-content\") pod \"redhat-operators-5m82l\" (UID: \"e05cbfa5-996d-48c8-999d-2ed43387748d\") " pod="openshift-marketplace/redhat-operators-5m82l" Nov 25 17:23:30 crc kubenswrapper[4812]: I1125 17:23:30.738582 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e05cbfa5-996d-48c8-999d-2ed43387748d-utilities\") pod \"redhat-operators-5m82l\" (UID: \"e05cbfa5-996d-48c8-999d-2ed43387748d\") " pod="openshift-marketplace/redhat-operators-5m82l" Nov 25 17:23:30 crc kubenswrapper[4812]: I1125 17:23:30.738604 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e05cbfa5-996d-48c8-999d-2ed43387748d-catalog-content\") pod \"redhat-operators-5m82l\" (UID: \"e05cbfa5-996d-48c8-999d-2ed43387748d\") " pod="openshift-marketplace/redhat-operators-5m82l" Nov 25 17:23:30 crc kubenswrapper[4812]: I1125 17:23:30.762837 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lw8k7\" (UniqueName: \"kubernetes.io/projected/e05cbfa5-996d-48c8-999d-2ed43387748d-kube-api-access-lw8k7\") pod \"redhat-operators-5m82l\" (UID: \"e05cbfa5-996d-48c8-999d-2ed43387748d\") " pod="openshift-marketplace/redhat-operators-5m82l" Nov 25 17:23:30 crc kubenswrapper[4812]: I1125 17:23:30.948167 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5m82l" Nov 25 17:23:31 crc kubenswrapper[4812]: I1125 17:23:31.481461 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5m82l"] Nov 25 17:23:31 crc kubenswrapper[4812]: I1125 17:23:31.632910 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5m82l" event={"ID":"e05cbfa5-996d-48c8-999d-2ed43387748d","Type":"ContainerStarted","Data":"ced6f9ede7396be462efdeb68ef53b3697cf25d61c22aa7e66ca08051e926533"} Nov 25 17:23:31 crc kubenswrapper[4812]: I1125 17:23:31.634556 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-xnpgj" event={"ID":"ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2","Type":"ContainerStarted","Data":"a67b61cbedc115681f77976a0e5a81af540199afa2cfa4c9239faf77c5eb29e0"} Nov 25 17:23:31 crc kubenswrapper[4812]: I1125 17:23:31.658102 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-xnpgj" podStartSLOduration=2.167640992 podStartE2EDuration="2.658085892s" podCreationTimestamp="2025-11-25 17:23:29 +0000 UTC" firstStartedPulling="2025-11-25 17:23:30.492739768 +0000 UTC m=+2185.332881863" lastFinishedPulling="2025-11-25 17:23:30.983184668 +0000 UTC m=+2185.823326763" observedRunningTime="2025-11-25 17:23:31.650311402 +0000 UTC m=+2186.490453497" watchObservedRunningTime="2025-11-25 17:23:31.658085892 +0000 UTC m=+2186.498227987" Nov 25 17:23:32 crc kubenswrapper[4812]: I1125 17:23:32.645481 4812 generic.go:334] "Generic (PLEG): container finished" podID="e05cbfa5-996d-48c8-999d-2ed43387748d" containerID="942589d4c253f4db06c7ef025dbb4e87dc3a2ceb2c0cb2f74c80ec1f257f3925" exitCode=0 Nov 25 17:23:32 crc kubenswrapper[4812]: I1125 17:23:32.645657 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5m82l" event={"ID":"e05cbfa5-996d-48c8-999d-2ed43387748d","Type":"ContainerDied","Data":"942589d4c253f4db06c7ef025dbb4e87dc3a2ceb2c0cb2f74c80ec1f257f3925"} Nov 25 17:23:34 crc kubenswrapper[4812]: I1125 17:23:34.661481 4812 generic.go:334] "Generic (PLEG): container finished" podID="e05cbfa5-996d-48c8-999d-2ed43387748d" containerID="6465cffbbff39cbad455a4d7a39e914aedbe5e5cb46bdd233cb1bbbf98c5b248" exitCode=0 Nov 25 17:23:34 crc kubenswrapper[4812]: I1125 17:23:34.661585 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5m82l" event={"ID":"e05cbfa5-996d-48c8-999d-2ed43387748d","Type":"ContainerDied","Data":"6465cffbbff39cbad455a4d7a39e914aedbe5e5cb46bdd233cb1bbbf98c5b248"} Nov 25 17:23:35 crc kubenswrapper[4812]: I1125 17:23:35.681736 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5m82l" event={"ID":"e05cbfa5-996d-48c8-999d-2ed43387748d","Type":"ContainerStarted","Data":"79a50076d3861aba9982ccf4a71146fdd8105d33a79c55e30571e5e52ec73d58"} Nov 25 17:23:35 crc kubenswrapper[4812]: I1125 17:23:35.703214 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5m82l" podStartSLOduration=3.266118415 podStartE2EDuration="5.703195397s" podCreationTimestamp="2025-11-25 17:23:30 +0000 UTC" firstStartedPulling="2025-11-25 17:23:32.64814439 +0000 UTC m=+2187.488286485" lastFinishedPulling="2025-11-25 17:23:35.085221382 +0000 UTC m=+2189.925363467" observedRunningTime="2025-11-25 17:23:35.696755574 +0000 UTC m=+2190.536897689" watchObservedRunningTime="2025-11-25 17:23:35.703195397 +0000 UTC m=+2190.543337492" Nov 25 17:23:40 crc kubenswrapper[4812]: I1125 17:23:40.950416 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5m82l" Nov 25 17:23:40 crc kubenswrapper[4812]: I1125 17:23:40.951058 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5m82l" Nov 25 17:23:40 crc kubenswrapper[4812]: I1125 17:23:40.995051 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5m82l" Nov 25 17:23:41 crc kubenswrapper[4812]: I1125 17:23:41.777102 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5m82l" Nov 25 17:23:41 crc kubenswrapper[4812]: I1125 17:23:41.830347 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5m82l"] Nov 25 17:23:43 crc kubenswrapper[4812]: I1125 17:23:43.745048 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-5m82l" podUID="e05cbfa5-996d-48c8-999d-2ed43387748d" containerName="registry-server" containerID="cri-o://79a50076d3861aba9982ccf4a71146fdd8105d33a79c55e30571e5e52ec73d58" gracePeriod=2 Nov 25 17:23:44 crc kubenswrapper[4812]: I1125 17:23:44.183708 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5m82l" Nov 25 17:23:44 crc kubenswrapper[4812]: I1125 17:23:44.386515 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lw8k7\" (UniqueName: \"kubernetes.io/projected/e05cbfa5-996d-48c8-999d-2ed43387748d-kube-api-access-lw8k7\") pod \"e05cbfa5-996d-48c8-999d-2ed43387748d\" (UID: \"e05cbfa5-996d-48c8-999d-2ed43387748d\") " Nov 25 17:23:44 crc kubenswrapper[4812]: I1125 17:23:44.386701 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e05cbfa5-996d-48c8-999d-2ed43387748d-catalog-content\") pod \"e05cbfa5-996d-48c8-999d-2ed43387748d\" (UID: \"e05cbfa5-996d-48c8-999d-2ed43387748d\") " Nov 25 17:23:44 crc kubenswrapper[4812]: I1125 17:23:44.386752 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e05cbfa5-996d-48c8-999d-2ed43387748d-utilities\") pod \"e05cbfa5-996d-48c8-999d-2ed43387748d\" (UID: \"e05cbfa5-996d-48c8-999d-2ed43387748d\") " Nov 25 17:23:44 crc kubenswrapper[4812]: I1125 17:23:44.388355 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e05cbfa5-996d-48c8-999d-2ed43387748d-utilities" (OuterVolumeSpecName: "utilities") pod "e05cbfa5-996d-48c8-999d-2ed43387748d" (UID: "e05cbfa5-996d-48c8-999d-2ed43387748d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:23:44 crc kubenswrapper[4812]: I1125 17:23:44.401972 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e05cbfa5-996d-48c8-999d-2ed43387748d-kube-api-access-lw8k7" (OuterVolumeSpecName: "kube-api-access-lw8k7") pod "e05cbfa5-996d-48c8-999d-2ed43387748d" (UID: "e05cbfa5-996d-48c8-999d-2ed43387748d"). InnerVolumeSpecName "kube-api-access-lw8k7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:23:44 crc kubenswrapper[4812]: I1125 17:23:44.488259 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lw8k7\" (UniqueName: \"kubernetes.io/projected/e05cbfa5-996d-48c8-999d-2ed43387748d-kube-api-access-lw8k7\") on node \"crc\" DevicePath \"\"" Nov 25 17:23:44 crc kubenswrapper[4812]: I1125 17:23:44.488306 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e05cbfa5-996d-48c8-999d-2ed43387748d-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:23:44 crc kubenswrapper[4812]: I1125 17:23:44.754212 4812 generic.go:334] "Generic (PLEG): container finished" podID="e05cbfa5-996d-48c8-999d-2ed43387748d" containerID="79a50076d3861aba9982ccf4a71146fdd8105d33a79c55e30571e5e52ec73d58" exitCode=0 Nov 25 17:23:44 crc kubenswrapper[4812]: I1125 17:23:44.754294 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5m82l" Nov 25 17:23:44 crc kubenswrapper[4812]: I1125 17:23:44.754310 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5m82l" event={"ID":"e05cbfa5-996d-48c8-999d-2ed43387748d","Type":"ContainerDied","Data":"79a50076d3861aba9982ccf4a71146fdd8105d33a79c55e30571e5e52ec73d58"} Nov 25 17:23:44 crc kubenswrapper[4812]: I1125 17:23:44.754675 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5m82l" event={"ID":"e05cbfa5-996d-48c8-999d-2ed43387748d","Type":"ContainerDied","Data":"ced6f9ede7396be462efdeb68ef53b3697cf25d61c22aa7e66ca08051e926533"} Nov 25 17:23:44 crc kubenswrapper[4812]: I1125 17:23:44.754694 4812 scope.go:117] "RemoveContainer" containerID="79a50076d3861aba9982ccf4a71146fdd8105d33a79c55e30571e5e52ec73d58" Nov 25 17:23:44 crc kubenswrapper[4812]: I1125 17:23:44.774927 4812 scope.go:117] "RemoveContainer" containerID="6465cffbbff39cbad455a4d7a39e914aedbe5e5cb46bdd233cb1bbbf98c5b248" Nov 25 17:23:44 crc kubenswrapper[4812]: I1125 17:23:44.795763 4812 scope.go:117] "RemoveContainer" containerID="942589d4c253f4db06c7ef025dbb4e87dc3a2ceb2c0cb2f74c80ec1f257f3925" Nov 25 17:23:44 crc kubenswrapper[4812]: I1125 17:23:44.837810 4812 scope.go:117] "RemoveContainer" containerID="79a50076d3861aba9982ccf4a71146fdd8105d33a79c55e30571e5e52ec73d58" Nov 25 17:23:44 crc kubenswrapper[4812]: E1125 17:23:44.838173 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"79a50076d3861aba9982ccf4a71146fdd8105d33a79c55e30571e5e52ec73d58\": container with ID starting with 79a50076d3861aba9982ccf4a71146fdd8105d33a79c55e30571e5e52ec73d58 not found: ID does not exist" containerID="79a50076d3861aba9982ccf4a71146fdd8105d33a79c55e30571e5e52ec73d58" Nov 25 17:23:44 crc kubenswrapper[4812]: I1125 17:23:44.838205 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"79a50076d3861aba9982ccf4a71146fdd8105d33a79c55e30571e5e52ec73d58"} err="failed to get container status \"79a50076d3861aba9982ccf4a71146fdd8105d33a79c55e30571e5e52ec73d58\": rpc error: code = NotFound desc = could not find container \"79a50076d3861aba9982ccf4a71146fdd8105d33a79c55e30571e5e52ec73d58\": container with ID starting with 79a50076d3861aba9982ccf4a71146fdd8105d33a79c55e30571e5e52ec73d58 not found: ID does not exist" Nov 25 17:23:44 crc kubenswrapper[4812]: I1125 17:23:44.838227 4812 scope.go:117] "RemoveContainer" containerID="6465cffbbff39cbad455a4d7a39e914aedbe5e5cb46bdd233cb1bbbf98c5b248" Nov 25 17:23:44 crc kubenswrapper[4812]: E1125 17:23:44.838472 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6465cffbbff39cbad455a4d7a39e914aedbe5e5cb46bdd233cb1bbbf98c5b248\": container with ID starting with 6465cffbbff39cbad455a4d7a39e914aedbe5e5cb46bdd233cb1bbbf98c5b248 not found: ID does not exist" containerID="6465cffbbff39cbad455a4d7a39e914aedbe5e5cb46bdd233cb1bbbf98c5b248" Nov 25 17:23:44 crc kubenswrapper[4812]: I1125 17:23:44.838517 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6465cffbbff39cbad455a4d7a39e914aedbe5e5cb46bdd233cb1bbbf98c5b248"} err="failed to get container status \"6465cffbbff39cbad455a4d7a39e914aedbe5e5cb46bdd233cb1bbbf98c5b248\": rpc error: code = NotFound desc = could not find container \"6465cffbbff39cbad455a4d7a39e914aedbe5e5cb46bdd233cb1bbbf98c5b248\": container with ID starting with 6465cffbbff39cbad455a4d7a39e914aedbe5e5cb46bdd233cb1bbbf98c5b248 not found: ID does not exist" Nov 25 17:23:44 crc kubenswrapper[4812]: I1125 17:23:44.838573 4812 scope.go:117] "RemoveContainer" containerID="942589d4c253f4db06c7ef025dbb4e87dc3a2ceb2c0cb2f74c80ec1f257f3925" Nov 25 17:23:44 crc kubenswrapper[4812]: E1125 17:23:44.838847 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"942589d4c253f4db06c7ef025dbb4e87dc3a2ceb2c0cb2f74c80ec1f257f3925\": container with ID starting with 942589d4c253f4db06c7ef025dbb4e87dc3a2ceb2c0cb2f74c80ec1f257f3925 not found: ID does not exist" containerID="942589d4c253f4db06c7ef025dbb4e87dc3a2ceb2c0cb2f74c80ec1f257f3925" Nov 25 17:23:44 crc kubenswrapper[4812]: I1125 17:23:44.838884 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"942589d4c253f4db06c7ef025dbb4e87dc3a2ceb2c0cb2f74c80ec1f257f3925"} err="failed to get container status \"942589d4c253f4db06c7ef025dbb4e87dc3a2ceb2c0cb2f74c80ec1f257f3925\": rpc error: code = NotFound desc = could not find container \"942589d4c253f4db06c7ef025dbb4e87dc3a2ceb2c0cb2f74c80ec1f257f3925\": container with ID starting with 942589d4c253f4db06c7ef025dbb4e87dc3a2ceb2c0cb2f74c80ec1f257f3925 not found: ID does not exist" Nov 25 17:23:45 crc kubenswrapper[4812]: I1125 17:23:45.795856 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e05cbfa5-996d-48c8-999d-2ed43387748d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e05cbfa5-996d-48c8-999d-2ed43387748d" (UID: "e05cbfa5-996d-48c8-999d-2ed43387748d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:23:45 crc kubenswrapper[4812]: I1125 17:23:45.811555 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e05cbfa5-996d-48c8-999d-2ed43387748d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:23:45 crc kubenswrapper[4812]: I1125 17:23:45.987129 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5m82l"] Nov 25 17:23:45 crc kubenswrapper[4812]: I1125 17:23:45.998524 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-5m82l"] Nov 25 17:23:47 crc kubenswrapper[4812]: I1125 17:23:47.840773 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e05cbfa5-996d-48c8-999d-2ed43387748d" path="/var/lib/kubelet/pods/e05cbfa5-996d-48c8-999d-2ed43387748d/volumes" Nov 25 17:24:14 crc kubenswrapper[4812]: I1125 17:24:14.018550 4812 generic.go:334] "Generic (PLEG): container finished" podID="ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2" containerID="a67b61cbedc115681f77976a0e5a81af540199afa2cfa4c9239faf77c5eb29e0" exitCode=0 Nov 25 17:24:14 crc kubenswrapper[4812]: I1125 17:24:14.018584 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-xnpgj" event={"ID":"ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2","Type":"ContainerDied","Data":"a67b61cbedc115681f77976a0e5a81af540199afa2cfa4c9239faf77c5eb29e0"} Nov 25 17:24:15 crc kubenswrapper[4812]: I1125 17:24:15.448872 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-xnpgj" Nov 25 17:24:15 crc kubenswrapper[4812]: I1125 17:24:15.572450 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2-ssh-key\") pod \"ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2\" (UID: \"ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2\") " Nov 25 17:24:15 crc kubenswrapper[4812]: I1125 17:24:15.572684 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2-ceph\") pod \"ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2\" (UID: \"ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2\") " Nov 25 17:24:15 crc kubenswrapper[4812]: I1125 17:24:15.572731 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tpf2c\" (UniqueName: \"kubernetes.io/projected/ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2-kube-api-access-tpf2c\") pod \"ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2\" (UID: \"ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2\") " Nov 25 17:24:15 crc kubenswrapper[4812]: I1125 17:24:15.572847 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2-inventory\") pod \"ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2\" (UID: \"ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2\") " Nov 25 17:24:15 crc kubenswrapper[4812]: I1125 17:24:15.590666 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2-ceph" (OuterVolumeSpecName: "ceph") pod "ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2" (UID: "ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:24:15 crc kubenswrapper[4812]: I1125 17:24:15.593748 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2-kube-api-access-tpf2c" (OuterVolumeSpecName: "kube-api-access-tpf2c") pod "ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2" (UID: "ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2"). InnerVolumeSpecName "kube-api-access-tpf2c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:24:15 crc kubenswrapper[4812]: I1125 17:24:15.612678 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2-inventory" (OuterVolumeSpecName: "inventory") pod "ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2" (UID: "ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:24:15 crc kubenswrapper[4812]: I1125 17:24:15.649697 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2" (UID: "ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:24:15 crc kubenswrapper[4812]: I1125 17:24:15.676623 4812 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 17:24:15 crc kubenswrapper[4812]: I1125 17:24:15.676664 4812 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 17:24:15 crc kubenswrapper[4812]: I1125 17:24:15.676674 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tpf2c\" (UniqueName: \"kubernetes.io/projected/ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2-kube-api-access-tpf2c\") on node \"crc\" DevicePath \"\"" Nov 25 17:24:15 crc kubenswrapper[4812]: I1125 17:24:15.676685 4812 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 17:24:16 crc kubenswrapper[4812]: I1125 17:24:16.042385 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-xnpgj" event={"ID":"ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2","Type":"ContainerDied","Data":"a78776cf914637fd680f8af142dbb1e41d614664babc8e4a94eb520c14f60c72"} Nov 25 17:24:16 crc kubenswrapper[4812]: I1125 17:24:16.042429 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a78776cf914637fd680f8af142dbb1e41d614664babc8e4a94eb520c14f60c72" Nov 25 17:24:16 crc kubenswrapper[4812]: I1125 17:24:16.042463 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-xnpgj" Nov 25 17:24:16 crc kubenswrapper[4812]: I1125 17:24:16.141915 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-fr8lh"] Nov 25 17:24:16 crc kubenswrapper[4812]: E1125 17:24:16.142438 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e05cbfa5-996d-48c8-999d-2ed43387748d" containerName="registry-server" Nov 25 17:24:16 crc kubenswrapper[4812]: I1125 17:24:16.142461 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="e05cbfa5-996d-48c8-999d-2ed43387748d" containerName="registry-server" Nov 25 17:24:16 crc kubenswrapper[4812]: E1125 17:24:16.142486 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 25 17:24:16 crc kubenswrapper[4812]: I1125 17:24:16.142497 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 25 17:24:16 crc kubenswrapper[4812]: E1125 17:24:16.142520 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e05cbfa5-996d-48c8-999d-2ed43387748d" containerName="extract-utilities" Nov 25 17:24:16 crc kubenswrapper[4812]: I1125 17:24:16.142565 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="e05cbfa5-996d-48c8-999d-2ed43387748d" containerName="extract-utilities" Nov 25 17:24:16 crc kubenswrapper[4812]: E1125 17:24:16.142596 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e05cbfa5-996d-48c8-999d-2ed43387748d" containerName="extract-content" Nov 25 17:24:16 crc kubenswrapper[4812]: I1125 17:24:16.142603 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="e05cbfa5-996d-48c8-999d-2ed43387748d" containerName="extract-content" Nov 25 17:24:16 crc kubenswrapper[4812]: I1125 17:24:16.142837 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae7f8e6b-3584-4bc9-a3a9-5d6d60d553a2" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 25 17:24:16 crc kubenswrapper[4812]: I1125 17:24:16.142906 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="e05cbfa5-996d-48c8-999d-2ed43387748d" containerName="registry-server" Nov 25 17:24:16 crc kubenswrapper[4812]: I1125 17:24:16.143779 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-fr8lh" Nov 25 17:24:16 crc kubenswrapper[4812]: I1125 17:24:16.146124 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 17:24:16 crc kubenswrapper[4812]: I1125 17:24:16.146562 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wtld4" Nov 25 17:24:16 crc kubenswrapper[4812]: I1125 17:24:16.146683 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 17:24:16 crc kubenswrapper[4812]: I1125 17:24:16.146850 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 17:24:16 crc kubenswrapper[4812]: I1125 17:24:16.148147 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 17:24:16 crc kubenswrapper[4812]: I1125 17:24:16.156032 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-fr8lh"] Nov 25 17:24:16 crc kubenswrapper[4812]: I1125 17:24:16.187621 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h44h9\" (UniqueName: \"kubernetes.io/projected/3845e5d5-67fa-48d9-94a0-9e23acbbc368-kube-api-access-h44h9\") pod \"ssh-known-hosts-edpm-deployment-fr8lh\" (UID: \"3845e5d5-67fa-48d9-94a0-9e23acbbc368\") " pod="openstack/ssh-known-hosts-edpm-deployment-fr8lh" Nov 25 17:24:16 crc kubenswrapper[4812]: I1125 17:24:16.187911 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/3845e5d5-67fa-48d9-94a0-9e23acbbc368-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-fr8lh\" (UID: \"3845e5d5-67fa-48d9-94a0-9e23acbbc368\") " pod="openstack/ssh-known-hosts-edpm-deployment-fr8lh" Nov 25 17:24:16 crc kubenswrapper[4812]: I1125 17:24:16.188155 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/3845e5d5-67fa-48d9-94a0-9e23acbbc368-ceph\") pod \"ssh-known-hosts-edpm-deployment-fr8lh\" (UID: \"3845e5d5-67fa-48d9-94a0-9e23acbbc368\") " pod="openstack/ssh-known-hosts-edpm-deployment-fr8lh" Nov 25 17:24:16 crc kubenswrapper[4812]: I1125 17:24:16.188424 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/3845e5d5-67fa-48d9-94a0-9e23acbbc368-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-fr8lh\" (UID: \"3845e5d5-67fa-48d9-94a0-9e23acbbc368\") " pod="openstack/ssh-known-hosts-edpm-deployment-fr8lh" Nov 25 17:24:16 crc kubenswrapper[4812]: I1125 17:24:16.290777 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/3845e5d5-67fa-48d9-94a0-9e23acbbc368-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-fr8lh\" (UID: \"3845e5d5-67fa-48d9-94a0-9e23acbbc368\") " pod="openstack/ssh-known-hosts-edpm-deployment-fr8lh" Nov 25 17:24:16 crc kubenswrapper[4812]: I1125 17:24:16.290913 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/3845e5d5-67fa-48d9-94a0-9e23acbbc368-ceph\") pod \"ssh-known-hosts-edpm-deployment-fr8lh\" (UID: \"3845e5d5-67fa-48d9-94a0-9e23acbbc368\") " pod="openstack/ssh-known-hosts-edpm-deployment-fr8lh" Nov 25 17:24:16 crc kubenswrapper[4812]: I1125 17:24:16.291022 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/3845e5d5-67fa-48d9-94a0-9e23acbbc368-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-fr8lh\" (UID: \"3845e5d5-67fa-48d9-94a0-9e23acbbc368\") " pod="openstack/ssh-known-hosts-edpm-deployment-fr8lh" Nov 25 17:24:16 crc kubenswrapper[4812]: I1125 17:24:16.291061 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h44h9\" (UniqueName: \"kubernetes.io/projected/3845e5d5-67fa-48d9-94a0-9e23acbbc368-kube-api-access-h44h9\") pod \"ssh-known-hosts-edpm-deployment-fr8lh\" (UID: \"3845e5d5-67fa-48d9-94a0-9e23acbbc368\") " pod="openstack/ssh-known-hosts-edpm-deployment-fr8lh" Nov 25 17:24:16 crc kubenswrapper[4812]: I1125 17:24:16.295046 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/3845e5d5-67fa-48d9-94a0-9e23acbbc368-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-fr8lh\" (UID: \"3845e5d5-67fa-48d9-94a0-9e23acbbc368\") " pod="openstack/ssh-known-hosts-edpm-deployment-fr8lh" Nov 25 17:24:16 crc kubenswrapper[4812]: I1125 17:24:16.295160 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/3845e5d5-67fa-48d9-94a0-9e23acbbc368-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-fr8lh\" (UID: \"3845e5d5-67fa-48d9-94a0-9e23acbbc368\") " pod="openstack/ssh-known-hosts-edpm-deployment-fr8lh" Nov 25 17:24:16 crc kubenswrapper[4812]: I1125 17:24:16.295251 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/3845e5d5-67fa-48d9-94a0-9e23acbbc368-ceph\") pod \"ssh-known-hosts-edpm-deployment-fr8lh\" (UID: \"3845e5d5-67fa-48d9-94a0-9e23acbbc368\") " pod="openstack/ssh-known-hosts-edpm-deployment-fr8lh" Nov 25 17:24:16 crc kubenswrapper[4812]: I1125 17:24:16.311504 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h44h9\" (UniqueName: \"kubernetes.io/projected/3845e5d5-67fa-48d9-94a0-9e23acbbc368-kube-api-access-h44h9\") pod \"ssh-known-hosts-edpm-deployment-fr8lh\" (UID: \"3845e5d5-67fa-48d9-94a0-9e23acbbc368\") " pod="openstack/ssh-known-hosts-edpm-deployment-fr8lh" Nov 25 17:24:16 crc kubenswrapper[4812]: I1125 17:24:16.462017 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-fr8lh" Nov 25 17:24:17 crc kubenswrapper[4812]: I1125 17:24:17.039016 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-fr8lh"] Nov 25 17:24:17 crc kubenswrapper[4812]: I1125 17:24:17.057795 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-fr8lh" event={"ID":"3845e5d5-67fa-48d9-94a0-9e23acbbc368","Type":"ContainerStarted","Data":"13fd4941deb75672f652971266d874be5b2edea123fce03173a334207ccee226"} Nov 25 17:24:18 crc kubenswrapper[4812]: I1125 17:24:18.069175 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-fr8lh" event={"ID":"3845e5d5-67fa-48d9-94a0-9e23acbbc368","Type":"ContainerStarted","Data":"c133d4ccef5a967122894d5bf3af88e0cd516f0e414b5998d7d98687d23610da"} Nov 25 17:24:18 crc kubenswrapper[4812]: I1125 17:24:18.087958 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-fr8lh" podStartSLOduration=1.6352550259999998 podStartE2EDuration="2.087943045s" podCreationTimestamp="2025-11-25 17:24:16 +0000 UTC" firstStartedPulling="2025-11-25 17:24:17.049006673 +0000 UTC m=+2231.889148778" lastFinishedPulling="2025-11-25 17:24:17.501694702 +0000 UTC m=+2232.341836797" observedRunningTime="2025-11-25 17:24:18.086578539 +0000 UTC m=+2232.926720634" watchObservedRunningTime="2025-11-25 17:24:18.087943045 +0000 UTC m=+2232.928085140" Nov 25 17:24:27 crc kubenswrapper[4812]: I1125 17:24:27.332243 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:24:27 crc kubenswrapper[4812]: I1125 17:24:27.332827 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:24:28 crc kubenswrapper[4812]: I1125 17:24:28.154812 4812 generic.go:334] "Generic (PLEG): container finished" podID="3845e5d5-67fa-48d9-94a0-9e23acbbc368" containerID="c133d4ccef5a967122894d5bf3af88e0cd516f0e414b5998d7d98687d23610da" exitCode=0 Nov 25 17:24:28 crc kubenswrapper[4812]: I1125 17:24:28.154903 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-fr8lh" event={"ID":"3845e5d5-67fa-48d9-94a0-9e23acbbc368","Type":"ContainerDied","Data":"c133d4ccef5a967122894d5bf3af88e0cd516f0e414b5998d7d98687d23610da"} Nov 25 17:24:29 crc kubenswrapper[4812]: I1125 17:24:29.610997 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-fr8lh" Nov 25 17:24:29 crc kubenswrapper[4812]: I1125 17:24:29.726991 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h44h9\" (UniqueName: \"kubernetes.io/projected/3845e5d5-67fa-48d9-94a0-9e23acbbc368-kube-api-access-h44h9\") pod \"3845e5d5-67fa-48d9-94a0-9e23acbbc368\" (UID: \"3845e5d5-67fa-48d9-94a0-9e23acbbc368\") " Nov 25 17:24:29 crc kubenswrapper[4812]: I1125 17:24:29.727331 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/3845e5d5-67fa-48d9-94a0-9e23acbbc368-inventory-0\") pod \"3845e5d5-67fa-48d9-94a0-9e23acbbc368\" (UID: \"3845e5d5-67fa-48d9-94a0-9e23acbbc368\") " Nov 25 17:24:29 crc kubenswrapper[4812]: I1125 17:24:29.727483 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/3845e5d5-67fa-48d9-94a0-9e23acbbc368-ceph\") pod \"3845e5d5-67fa-48d9-94a0-9e23acbbc368\" (UID: \"3845e5d5-67fa-48d9-94a0-9e23acbbc368\") " Nov 25 17:24:29 crc kubenswrapper[4812]: I1125 17:24:29.727671 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/3845e5d5-67fa-48d9-94a0-9e23acbbc368-ssh-key-openstack-edpm-ipam\") pod \"3845e5d5-67fa-48d9-94a0-9e23acbbc368\" (UID: \"3845e5d5-67fa-48d9-94a0-9e23acbbc368\") " Nov 25 17:24:29 crc kubenswrapper[4812]: I1125 17:24:29.735816 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3845e5d5-67fa-48d9-94a0-9e23acbbc368-ceph" (OuterVolumeSpecName: "ceph") pod "3845e5d5-67fa-48d9-94a0-9e23acbbc368" (UID: "3845e5d5-67fa-48d9-94a0-9e23acbbc368"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:24:29 crc kubenswrapper[4812]: I1125 17:24:29.738913 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3845e5d5-67fa-48d9-94a0-9e23acbbc368-kube-api-access-h44h9" (OuterVolumeSpecName: "kube-api-access-h44h9") pod "3845e5d5-67fa-48d9-94a0-9e23acbbc368" (UID: "3845e5d5-67fa-48d9-94a0-9e23acbbc368"). InnerVolumeSpecName "kube-api-access-h44h9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:24:29 crc kubenswrapper[4812]: I1125 17:24:29.760052 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3845e5d5-67fa-48d9-94a0-9e23acbbc368-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "3845e5d5-67fa-48d9-94a0-9e23acbbc368" (UID: "3845e5d5-67fa-48d9-94a0-9e23acbbc368"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:24:29 crc kubenswrapper[4812]: I1125 17:24:29.772883 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3845e5d5-67fa-48d9-94a0-9e23acbbc368-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "3845e5d5-67fa-48d9-94a0-9e23acbbc368" (UID: "3845e5d5-67fa-48d9-94a0-9e23acbbc368"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:24:29 crc kubenswrapper[4812]: I1125 17:24:29.830411 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h44h9\" (UniqueName: \"kubernetes.io/projected/3845e5d5-67fa-48d9-94a0-9e23acbbc368-kube-api-access-h44h9\") on node \"crc\" DevicePath \"\"" Nov 25 17:24:29 crc kubenswrapper[4812]: I1125 17:24:29.830457 4812 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/3845e5d5-67fa-48d9-94a0-9e23acbbc368-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 25 17:24:29 crc kubenswrapper[4812]: I1125 17:24:29.830475 4812 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/3845e5d5-67fa-48d9-94a0-9e23acbbc368-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 17:24:29 crc kubenswrapper[4812]: I1125 17:24:29.830488 4812 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/3845e5d5-67fa-48d9-94a0-9e23acbbc368-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 25 17:24:30 crc kubenswrapper[4812]: I1125 17:24:30.173289 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-fr8lh" event={"ID":"3845e5d5-67fa-48d9-94a0-9e23acbbc368","Type":"ContainerDied","Data":"13fd4941deb75672f652971266d874be5b2edea123fce03173a334207ccee226"} Nov 25 17:24:30 crc kubenswrapper[4812]: I1125 17:24:30.173591 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="13fd4941deb75672f652971266d874be5b2edea123fce03173a334207ccee226" Nov 25 17:24:30 crc kubenswrapper[4812]: I1125 17:24:30.173348 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-fr8lh" Nov 25 17:24:30 crc kubenswrapper[4812]: I1125 17:24:30.234098 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-wrx9p"] Nov 25 17:24:30 crc kubenswrapper[4812]: E1125 17:24:30.234765 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3845e5d5-67fa-48d9-94a0-9e23acbbc368" containerName="ssh-known-hosts-edpm-deployment" Nov 25 17:24:30 crc kubenswrapper[4812]: I1125 17:24:30.234853 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="3845e5d5-67fa-48d9-94a0-9e23acbbc368" containerName="ssh-known-hosts-edpm-deployment" Nov 25 17:24:30 crc kubenswrapper[4812]: I1125 17:24:30.235142 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="3845e5d5-67fa-48d9-94a0-9e23acbbc368" containerName="ssh-known-hosts-edpm-deployment" Nov 25 17:24:30 crc kubenswrapper[4812]: I1125 17:24:30.235919 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wrx9p" Nov 25 17:24:30 crc kubenswrapper[4812]: I1125 17:24:30.237960 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 17:24:30 crc kubenswrapper[4812]: I1125 17:24:30.238254 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 17:24:30 crc kubenswrapper[4812]: I1125 17:24:30.238406 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 17:24:30 crc kubenswrapper[4812]: I1125 17:24:30.238579 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 17:24:30 crc kubenswrapper[4812]: I1125 17:24:30.238727 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wtld4" Nov 25 17:24:30 crc kubenswrapper[4812]: I1125 17:24:30.244297 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-wrx9p"] Nov 25 17:24:30 crc kubenswrapper[4812]: I1125 17:24:30.338653 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2db73b87-1757-4e9f-a39a-7338c4db6549-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-wrx9p\" (UID: \"2db73b87-1757-4e9f-a39a-7338c4db6549\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wrx9p" Nov 25 17:24:30 crc kubenswrapper[4812]: I1125 17:24:30.338714 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2db73b87-1757-4e9f-a39a-7338c4db6549-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-wrx9p\" (UID: \"2db73b87-1757-4e9f-a39a-7338c4db6549\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wrx9p" Nov 25 17:24:30 crc kubenswrapper[4812]: I1125 17:24:30.338740 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p4zwn\" (UniqueName: \"kubernetes.io/projected/2db73b87-1757-4e9f-a39a-7338c4db6549-kube-api-access-p4zwn\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-wrx9p\" (UID: \"2db73b87-1757-4e9f-a39a-7338c4db6549\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wrx9p" Nov 25 17:24:30 crc kubenswrapper[4812]: I1125 17:24:30.338796 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2db73b87-1757-4e9f-a39a-7338c4db6549-ceph\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-wrx9p\" (UID: \"2db73b87-1757-4e9f-a39a-7338c4db6549\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wrx9p" Nov 25 17:24:30 crc kubenswrapper[4812]: I1125 17:24:30.440802 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2db73b87-1757-4e9f-a39a-7338c4db6549-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-wrx9p\" (UID: \"2db73b87-1757-4e9f-a39a-7338c4db6549\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wrx9p" Nov 25 17:24:30 crc kubenswrapper[4812]: I1125 17:24:30.440857 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2db73b87-1757-4e9f-a39a-7338c4db6549-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-wrx9p\" (UID: \"2db73b87-1757-4e9f-a39a-7338c4db6549\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wrx9p" Nov 25 17:24:30 crc kubenswrapper[4812]: I1125 17:24:30.440882 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p4zwn\" (UniqueName: \"kubernetes.io/projected/2db73b87-1757-4e9f-a39a-7338c4db6549-kube-api-access-p4zwn\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-wrx9p\" (UID: \"2db73b87-1757-4e9f-a39a-7338c4db6549\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wrx9p" Nov 25 17:24:30 crc kubenswrapper[4812]: I1125 17:24:30.440939 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2db73b87-1757-4e9f-a39a-7338c4db6549-ceph\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-wrx9p\" (UID: \"2db73b87-1757-4e9f-a39a-7338c4db6549\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wrx9p" Nov 25 17:24:30 crc kubenswrapper[4812]: I1125 17:24:30.444864 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2db73b87-1757-4e9f-a39a-7338c4db6549-ceph\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-wrx9p\" (UID: \"2db73b87-1757-4e9f-a39a-7338c4db6549\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wrx9p" Nov 25 17:24:30 crc kubenswrapper[4812]: I1125 17:24:30.445751 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2db73b87-1757-4e9f-a39a-7338c4db6549-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-wrx9p\" (UID: \"2db73b87-1757-4e9f-a39a-7338c4db6549\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wrx9p" Nov 25 17:24:30 crc kubenswrapper[4812]: I1125 17:24:30.447180 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2db73b87-1757-4e9f-a39a-7338c4db6549-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-wrx9p\" (UID: \"2db73b87-1757-4e9f-a39a-7338c4db6549\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wrx9p" Nov 25 17:24:30 crc kubenswrapper[4812]: I1125 17:24:30.479946 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p4zwn\" (UniqueName: \"kubernetes.io/projected/2db73b87-1757-4e9f-a39a-7338c4db6549-kube-api-access-p4zwn\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-wrx9p\" (UID: \"2db73b87-1757-4e9f-a39a-7338c4db6549\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wrx9p" Nov 25 17:24:30 crc kubenswrapper[4812]: I1125 17:24:30.612554 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wrx9p" Nov 25 17:24:31 crc kubenswrapper[4812]: I1125 17:24:31.112979 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-wrx9p"] Nov 25 17:24:31 crc kubenswrapper[4812]: I1125 17:24:31.201021 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wrx9p" event={"ID":"2db73b87-1757-4e9f-a39a-7338c4db6549","Type":"ContainerStarted","Data":"f325f82bc637272e5d8fcad236e46b83da074ffdc8517c7ed9f84570ecbf257a"} Nov 25 17:24:32 crc kubenswrapper[4812]: I1125 17:24:32.209938 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wrx9p" event={"ID":"2db73b87-1757-4e9f-a39a-7338c4db6549","Type":"ContainerStarted","Data":"06b15ca842a9f1259a93c2ee32f39c482b6409b680a7dc21dbdfc72cdb8296a4"} Nov 25 17:24:32 crc kubenswrapper[4812]: I1125 17:24:32.246835 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wrx9p" podStartSLOduration=1.735711851 podStartE2EDuration="2.246814632s" podCreationTimestamp="2025-11-25 17:24:30 +0000 UTC" firstStartedPulling="2025-11-25 17:24:31.136814239 +0000 UTC m=+2245.976956344" lastFinishedPulling="2025-11-25 17:24:31.64791704 +0000 UTC m=+2246.488059125" observedRunningTime="2025-11-25 17:24:32.238974292 +0000 UTC m=+2247.079116387" watchObservedRunningTime="2025-11-25 17:24:32.246814632 +0000 UTC m=+2247.086956727" Nov 25 17:24:40 crc kubenswrapper[4812]: I1125 17:24:40.294829 4812 generic.go:334] "Generic (PLEG): container finished" podID="2db73b87-1757-4e9f-a39a-7338c4db6549" containerID="06b15ca842a9f1259a93c2ee32f39c482b6409b680a7dc21dbdfc72cdb8296a4" exitCode=0 Nov 25 17:24:40 crc kubenswrapper[4812]: I1125 17:24:40.294925 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wrx9p" event={"ID":"2db73b87-1757-4e9f-a39a-7338c4db6549","Type":"ContainerDied","Data":"06b15ca842a9f1259a93c2ee32f39c482b6409b680a7dc21dbdfc72cdb8296a4"} Nov 25 17:24:41 crc kubenswrapper[4812]: I1125 17:24:41.711693 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wrx9p" Nov 25 17:24:41 crc kubenswrapper[4812]: I1125 17:24:41.851911 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p4zwn\" (UniqueName: \"kubernetes.io/projected/2db73b87-1757-4e9f-a39a-7338c4db6549-kube-api-access-p4zwn\") pod \"2db73b87-1757-4e9f-a39a-7338c4db6549\" (UID: \"2db73b87-1757-4e9f-a39a-7338c4db6549\") " Nov 25 17:24:41 crc kubenswrapper[4812]: I1125 17:24:41.852059 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2db73b87-1757-4e9f-a39a-7338c4db6549-inventory\") pod \"2db73b87-1757-4e9f-a39a-7338c4db6549\" (UID: \"2db73b87-1757-4e9f-a39a-7338c4db6549\") " Nov 25 17:24:41 crc kubenswrapper[4812]: I1125 17:24:41.852179 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2db73b87-1757-4e9f-a39a-7338c4db6549-ssh-key\") pod \"2db73b87-1757-4e9f-a39a-7338c4db6549\" (UID: \"2db73b87-1757-4e9f-a39a-7338c4db6549\") " Nov 25 17:24:41 crc kubenswrapper[4812]: I1125 17:24:41.852234 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2db73b87-1757-4e9f-a39a-7338c4db6549-ceph\") pod \"2db73b87-1757-4e9f-a39a-7338c4db6549\" (UID: \"2db73b87-1757-4e9f-a39a-7338c4db6549\") " Nov 25 17:24:41 crc kubenswrapper[4812]: I1125 17:24:41.858641 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2db73b87-1757-4e9f-a39a-7338c4db6549-kube-api-access-p4zwn" (OuterVolumeSpecName: "kube-api-access-p4zwn") pod "2db73b87-1757-4e9f-a39a-7338c4db6549" (UID: "2db73b87-1757-4e9f-a39a-7338c4db6549"). InnerVolumeSpecName "kube-api-access-p4zwn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:24:41 crc kubenswrapper[4812]: I1125 17:24:41.858993 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2db73b87-1757-4e9f-a39a-7338c4db6549-ceph" (OuterVolumeSpecName: "ceph") pod "2db73b87-1757-4e9f-a39a-7338c4db6549" (UID: "2db73b87-1757-4e9f-a39a-7338c4db6549"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:24:41 crc kubenswrapper[4812]: I1125 17:24:41.878822 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2db73b87-1757-4e9f-a39a-7338c4db6549-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "2db73b87-1757-4e9f-a39a-7338c4db6549" (UID: "2db73b87-1757-4e9f-a39a-7338c4db6549"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:24:41 crc kubenswrapper[4812]: I1125 17:24:41.885084 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2db73b87-1757-4e9f-a39a-7338c4db6549-inventory" (OuterVolumeSpecName: "inventory") pod "2db73b87-1757-4e9f-a39a-7338c4db6549" (UID: "2db73b87-1757-4e9f-a39a-7338c4db6549"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:24:41 crc kubenswrapper[4812]: I1125 17:24:41.955675 4812 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2db73b87-1757-4e9f-a39a-7338c4db6549-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 17:24:41 crc kubenswrapper[4812]: I1125 17:24:41.955707 4812 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/2db73b87-1757-4e9f-a39a-7338c4db6549-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 17:24:41 crc kubenswrapper[4812]: I1125 17:24:41.955717 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p4zwn\" (UniqueName: \"kubernetes.io/projected/2db73b87-1757-4e9f-a39a-7338c4db6549-kube-api-access-p4zwn\") on node \"crc\" DevicePath \"\"" Nov 25 17:24:41 crc kubenswrapper[4812]: I1125 17:24:41.955729 4812 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2db73b87-1757-4e9f-a39a-7338c4db6549-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 17:24:42 crc kubenswrapper[4812]: I1125 17:24:42.313124 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wrx9p" event={"ID":"2db73b87-1757-4e9f-a39a-7338c4db6549","Type":"ContainerDied","Data":"f325f82bc637272e5d8fcad236e46b83da074ffdc8517c7ed9f84570ecbf257a"} Nov 25 17:24:42 crc kubenswrapper[4812]: I1125 17:24:42.313425 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f325f82bc637272e5d8fcad236e46b83da074ffdc8517c7ed9f84570ecbf257a" Nov 25 17:24:42 crc kubenswrapper[4812]: I1125 17:24:42.313177 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wrx9p" Nov 25 17:24:42 crc kubenswrapper[4812]: I1125 17:24:42.401681 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h46rf"] Nov 25 17:24:42 crc kubenswrapper[4812]: E1125 17:24:42.402159 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2db73b87-1757-4e9f-a39a-7338c4db6549" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 25 17:24:42 crc kubenswrapper[4812]: I1125 17:24:42.402183 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="2db73b87-1757-4e9f-a39a-7338c4db6549" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 25 17:24:42 crc kubenswrapper[4812]: I1125 17:24:42.402398 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="2db73b87-1757-4e9f-a39a-7338c4db6549" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 25 17:24:42 crc kubenswrapper[4812]: I1125 17:24:42.403393 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h46rf" Nov 25 17:24:42 crc kubenswrapper[4812]: I1125 17:24:42.405869 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 17:24:42 crc kubenswrapper[4812]: I1125 17:24:42.406033 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 17:24:42 crc kubenswrapper[4812]: I1125 17:24:42.406095 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wtld4" Nov 25 17:24:42 crc kubenswrapper[4812]: I1125 17:24:42.406322 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 17:24:42 crc kubenswrapper[4812]: I1125 17:24:42.407280 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 17:24:42 crc kubenswrapper[4812]: I1125 17:24:42.414699 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h46rf"] Nov 25 17:24:42 crc kubenswrapper[4812]: I1125 17:24:42.468307 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c430c12d-7496-4f20-8fa3-bd9ff44844e0-ceph\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-h46rf\" (UID: \"c430c12d-7496-4f20-8fa3-bd9ff44844e0\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h46rf" Nov 25 17:24:42 crc kubenswrapper[4812]: I1125 17:24:42.468385 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c430c12d-7496-4f20-8fa3-bd9ff44844e0-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-h46rf\" (UID: \"c430c12d-7496-4f20-8fa3-bd9ff44844e0\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h46rf" Nov 25 17:24:42 crc kubenswrapper[4812]: I1125 17:24:42.468443 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c430c12d-7496-4f20-8fa3-bd9ff44844e0-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-h46rf\" (UID: \"c430c12d-7496-4f20-8fa3-bd9ff44844e0\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h46rf" Nov 25 17:24:42 crc kubenswrapper[4812]: I1125 17:24:42.468482 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v2rzz\" (UniqueName: \"kubernetes.io/projected/c430c12d-7496-4f20-8fa3-bd9ff44844e0-kube-api-access-v2rzz\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-h46rf\" (UID: \"c430c12d-7496-4f20-8fa3-bd9ff44844e0\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h46rf" Nov 25 17:24:42 crc kubenswrapper[4812]: I1125 17:24:42.569825 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c430c12d-7496-4f20-8fa3-bd9ff44844e0-ceph\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-h46rf\" (UID: \"c430c12d-7496-4f20-8fa3-bd9ff44844e0\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h46rf" Nov 25 17:24:42 crc kubenswrapper[4812]: I1125 17:24:42.569915 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c430c12d-7496-4f20-8fa3-bd9ff44844e0-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-h46rf\" (UID: \"c430c12d-7496-4f20-8fa3-bd9ff44844e0\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h46rf" Nov 25 17:24:42 crc kubenswrapper[4812]: I1125 17:24:42.569978 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c430c12d-7496-4f20-8fa3-bd9ff44844e0-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-h46rf\" (UID: \"c430c12d-7496-4f20-8fa3-bd9ff44844e0\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h46rf" Nov 25 17:24:42 crc kubenswrapper[4812]: I1125 17:24:42.570031 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v2rzz\" (UniqueName: \"kubernetes.io/projected/c430c12d-7496-4f20-8fa3-bd9ff44844e0-kube-api-access-v2rzz\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-h46rf\" (UID: \"c430c12d-7496-4f20-8fa3-bd9ff44844e0\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h46rf" Nov 25 17:24:42 crc kubenswrapper[4812]: I1125 17:24:42.574942 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c430c12d-7496-4f20-8fa3-bd9ff44844e0-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-h46rf\" (UID: \"c430c12d-7496-4f20-8fa3-bd9ff44844e0\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h46rf" Nov 25 17:24:42 crc kubenswrapper[4812]: I1125 17:24:42.575086 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c430c12d-7496-4f20-8fa3-bd9ff44844e0-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-h46rf\" (UID: \"c430c12d-7496-4f20-8fa3-bd9ff44844e0\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h46rf" Nov 25 17:24:42 crc kubenswrapper[4812]: I1125 17:24:42.576846 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c430c12d-7496-4f20-8fa3-bd9ff44844e0-ceph\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-h46rf\" (UID: \"c430c12d-7496-4f20-8fa3-bd9ff44844e0\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h46rf" Nov 25 17:24:42 crc kubenswrapper[4812]: I1125 17:24:42.586803 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v2rzz\" (UniqueName: \"kubernetes.io/projected/c430c12d-7496-4f20-8fa3-bd9ff44844e0-kube-api-access-v2rzz\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-h46rf\" (UID: \"c430c12d-7496-4f20-8fa3-bd9ff44844e0\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h46rf" Nov 25 17:24:42 crc kubenswrapper[4812]: I1125 17:24:42.737600 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h46rf" Nov 25 17:24:43 crc kubenswrapper[4812]: I1125 17:24:43.321487 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h46rf"] Nov 25 17:24:44 crc kubenswrapper[4812]: I1125 17:24:44.336067 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h46rf" event={"ID":"c430c12d-7496-4f20-8fa3-bd9ff44844e0","Type":"ContainerStarted","Data":"506dd9e96778b0a4a561dd4a18722d4107d2eab14522033c06031eb4b96eb128"} Nov 25 17:24:44 crc kubenswrapper[4812]: I1125 17:24:44.336487 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h46rf" event={"ID":"c430c12d-7496-4f20-8fa3-bd9ff44844e0","Type":"ContainerStarted","Data":"1027957682ffbe5f770111794666df80830705f737e7b11a25181a80617c504d"} Nov 25 17:24:44 crc kubenswrapper[4812]: I1125 17:24:44.367706 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h46rf" podStartSLOduration=1.713252161 podStartE2EDuration="2.367690778s" podCreationTimestamp="2025-11-25 17:24:42 +0000 UTC" firstStartedPulling="2025-11-25 17:24:43.325675853 +0000 UTC m=+2258.165817948" lastFinishedPulling="2025-11-25 17:24:43.98011447 +0000 UTC m=+2258.820256565" observedRunningTime="2025-11-25 17:24:44.361753808 +0000 UTC m=+2259.201895903" watchObservedRunningTime="2025-11-25 17:24:44.367690778 +0000 UTC m=+2259.207832873" Nov 25 17:24:46 crc kubenswrapper[4812]: I1125 17:24:46.861042 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-dfrdw"] Nov 25 17:24:46 crc kubenswrapper[4812]: I1125 17:24:46.864732 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dfrdw" Nov 25 17:24:46 crc kubenswrapper[4812]: I1125 17:24:46.907690 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dfrdw"] Nov 25 17:24:46 crc kubenswrapper[4812]: I1125 17:24:46.948012 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867-catalog-content\") pod \"community-operators-dfrdw\" (UID: \"c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867\") " pod="openshift-marketplace/community-operators-dfrdw" Nov 25 17:24:46 crc kubenswrapper[4812]: I1125 17:24:46.948091 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jmng2\" (UniqueName: \"kubernetes.io/projected/c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867-kube-api-access-jmng2\") pod \"community-operators-dfrdw\" (UID: \"c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867\") " pod="openshift-marketplace/community-operators-dfrdw" Nov 25 17:24:46 crc kubenswrapper[4812]: I1125 17:24:46.948137 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867-utilities\") pod \"community-operators-dfrdw\" (UID: \"c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867\") " pod="openshift-marketplace/community-operators-dfrdw" Nov 25 17:24:47 crc kubenswrapper[4812]: I1125 17:24:47.049648 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867-catalog-content\") pod \"community-operators-dfrdw\" (UID: \"c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867\") " pod="openshift-marketplace/community-operators-dfrdw" Nov 25 17:24:47 crc kubenswrapper[4812]: I1125 17:24:47.049896 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jmng2\" (UniqueName: \"kubernetes.io/projected/c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867-kube-api-access-jmng2\") pod \"community-operators-dfrdw\" (UID: \"c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867\") " pod="openshift-marketplace/community-operators-dfrdw" Nov 25 17:24:47 crc kubenswrapper[4812]: I1125 17:24:47.050037 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867-utilities\") pod \"community-operators-dfrdw\" (UID: \"c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867\") " pod="openshift-marketplace/community-operators-dfrdw" Nov 25 17:24:47 crc kubenswrapper[4812]: I1125 17:24:47.050270 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867-catalog-content\") pod \"community-operators-dfrdw\" (UID: \"c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867\") " pod="openshift-marketplace/community-operators-dfrdw" Nov 25 17:24:47 crc kubenswrapper[4812]: I1125 17:24:47.050519 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867-utilities\") pod \"community-operators-dfrdw\" (UID: \"c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867\") " pod="openshift-marketplace/community-operators-dfrdw" Nov 25 17:24:47 crc kubenswrapper[4812]: I1125 17:24:47.072628 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jmng2\" (UniqueName: \"kubernetes.io/projected/c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867-kube-api-access-jmng2\") pod \"community-operators-dfrdw\" (UID: \"c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867\") " pod="openshift-marketplace/community-operators-dfrdw" Nov 25 17:24:47 crc kubenswrapper[4812]: I1125 17:24:47.190563 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dfrdw" Nov 25 17:24:47 crc kubenswrapper[4812]: I1125 17:24:47.746135 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dfrdw"] Nov 25 17:24:47 crc kubenswrapper[4812]: W1125 17:24:47.753367 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc05d7dbc_3d2f_40c2_b9e3_c4e137bb0867.slice/crio-c1b45850fce46e8d19405e644a1d445da47f925d268676f06fd98e214df19b1b WatchSource:0}: Error finding container c1b45850fce46e8d19405e644a1d445da47f925d268676f06fd98e214df19b1b: Status 404 returned error can't find the container with id c1b45850fce46e8d19405e644a1d445da47f925d268676f06fd98e214df19b1b Nov 25 17:24:48 crc kubenswrapper[4812]: I1125 17:24:48.381071 4812 generic.go:334] "Generic (PLEG): container finished" podID="c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867" containerID="9b3ae785843d9b3c1948e001e21b5f014a555f6915d837777b8b0f98bcd56339" exitCode=0 Nov 25 17:24:48 crc kubenswrapper[4812]: I1125 17:24:48.381166 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dfrdw" event={"ID":"c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867","Type":"ContainerDied","Data":"9b3ae785843d9b3c1948e001e21b5f014a555f6915d837777b8b0f98bcd56339"} Nov 25 17:24:48 crc kubenswrapper[4812]: I1125 17:24:48.381465 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dfrdw" event={"ID":"c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867","Type":"ContainerStarted","Data":"c1b45850fce46e8d19405e644a1d445da47f925d268676f06fd98e214df19b1b"} Nov 25 17:24:49 crc kubenswrapper[4812]: I1125 17:24:49.396087 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dfrdw" event={"ID":"c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867","Type":"ContainerStarted","Data":"634fb7fe22c335c8c827de911ee81b85151ccca9cf83426dbeed6250432a3d1a"} Nov 25 17:24:50 crc kubenswrapper[4812]: I1125 17:24:50.411920 4812 generic.go:334] "Generic (PLEG): container finished" podID="c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867" containerID="634fb7fe22c335c8c827de911ee81b85151ccca9cf83426dbeed6250432a3d1a" exitCode=0 Nov 25 17:24:50 crc kubenswrapper[4812]: I1125 17:24:50.412069 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dfrdw" event={"ID":"c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867","Type":"ContainerDied","Data":"634fb7fe22c335c8c827de911ee81b85151ccca9cf83426dbeed6250432a3d1a"} Nov 25 17:24:51 crc kubenswrapper[4812]: I1125 17:24:51.424967 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dfrdw" event={"ID":"c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867","Type":"ContainerStarted","Data":"b3355e4fbfb64260b06c6668ec60fcef7510c0daa7f3154f6b4b5e193ebd7602"} Nov 25 17:24:51 crc kubenswrapper[4812]: I1125 17:24:51.451611 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-dfrdw" podStartSLOduration=2.910704206 podStartE2EDuration="5.451571075s" podCreationTimestamp="2025-11-25 17:24:46 +0000 UTC" firstStartedPulling="2025-11-25 17:24:48.383900842 +0000 UTC m=+2263.224042937" lastFinishedPulling="2025-11-25 17:24:50.924767711 +0000 UTC m=+2265.764909806" observedRunningTime="2025-11-25 17:24:51.44319939 +0000 UTC m=+2266.283341505" watchObservedRunningTime="2025-11-25 17:24:51.451571075 +0000 UTC m=+2266.291713210" Nov 25 17:24:54 crc kubenswrapper[4812]: I1125 17:24:54.457304 4812 generic.go:334] "Generic (PLEG): container finished" podID="c430c12d-7496-4f20-8fa3-bd9ff44844e0" containerID="506dd9e96778b0a4a561dd4a18722d4107d2eab14522033c06031eb4b96eb128" exitCode=0 Nov 25 17:24:54 crc kubenswrapper[4812]: I1125 17:24:54.457377 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h46rf" event={"ID":"c430c12d-7496-4f20-8fa3-bd9ff44844e0","Type":"ContainerDied","Data":"506dd9e96778b0a4a561dd4a18722d4107d2eab14522033c06031eb4b96eb128"} Nov 25 17:24:55 crc kubenswrapper[4812]: I1125 17:24:55.876760 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h46rf" Nov 25 17:24:55 crc kubenswrapper[4812]: I1125 17:24:55.904486 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c430c12d-7496-4f20-8fa3-bd9ff44844e0-ceph\") pod \"c430c12d-7496-4f20-8fa3-bd9ff44844e0\" (UID: \"c430c12d-7496-4f20-8fa3-bd9ff44844e0\") " Nov 25 17:24:55 crc kubenswrapper[4812]: I1125 17:24:55.904567 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c430c12d-7496-4f20-8fa3-bd9ff44844e0-ssh-key\") pod \"c430c12d-7496-4f20-8fa3-bd9ff44844e0\" (UID: \"c430c12d-7496-4f20-8fa3-bd9ff44844e0\") " Nov 25 17:24:55 crc kubenswrapper[4812]: I1125 17:24:55.904616 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c430c12d-7496-4f20-8fa3-bd9ff44844e0-inventory\") pod \"c430c12d-7496-4f20-8fa3-bd9ff44844e0\" (UID: \"c430c12d-7496-4f20-8fa3-bd9ff44844e0\") " Nov 25 17:24:55 crc kubenswrapper[4812]: I1125 17:24:55.904669 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v2rzz\" (UniqueName: \"kubernetes.io/projected/c430c12d-7496-4f20-8fa3-bd9ff44844e0-kube-api-access-v2rzz\") pod \"c430c12d-7496-4f20-8fa3-bd9ff44844e0\" (UID: \"c430c12d-7496-4f20-8fa3-bd9ff44844e0\") " Nov 25 17:24:55 crc kubenswrapper[4812]: I1125 17:24:55.911558 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c430c12d-7496-4f20-8fa3-bd9ff44844e0-ceph" (OuterVolumeSpecName: "ceph") pod "c430c12d-7496-4f20-8fa3-bd9ff44844e0" (UID: "c430c12d-7496-4f20-8fa3-bd9ff44844e0"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:24:55 crc kubenswrapper[4812]: I1125 17:24:55.912489 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c430c12d-7496-4f20-8fa3-bd9ff44844e0-kube-api-access-v2rzz" (OuterVolumeSpecName: "kube-api-access-v2rzz") pod "c430c12d-7496-4f20-8fa3-bd9ff44844e0" (UID: "c430c12d-7496-4f20-8fa3-bd9ff44844e0"). InnerVolumeSpecName "kube-api-access-v2rzz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:24:55 crc kubenswrapper[4812]: I1125 17:24:55.934163 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c430c12d-7496-4f20-8fa3-bd9ff44844e0-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "c430c12d-7496-4f20-8fa3-bd9ff44844e0" (UID: "c430c12d-7496-4f20-8fa3-bd9ff44844e0"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:24:55 crc kubenswrapper[4812]: I1125 17:24:55.934402 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c430c12d-7496-4f20-8fa3-bd9ff44844e0-inventory" (OuterVolumeSpecName: "inventory") pod "c430c12d-7496-4f20-8fa3-bd9ff44844e0" (UID: "c430c12d-7496-4f20-8fa3-bd9ff44844e0"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.006797 4812 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c430c12d-7496-4f20-8fa3-bd9ff44844e0-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.006838 4812 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c430c12d-7496-4f20-8fa3-bd9ff44844e0-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.006848 4812 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c430c12d-7496-4f20-8fa3-bd9ff44844e0-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.006860 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v2rzz\" (UniqueName: \"kubernetes.io/projected/c430c12d-7496-4f20-8fa3-bd9ff44844e0-kube-api-access-v2rzz\") on node \"crc\" DevicePath \"\"" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.474163 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h46rf" event={"ID":"c430c12d-7496-4f20-8fa3-bd9ff44844e0","Type":"ContainerDied","Data":"1027957682ffbe5f770111794666df80830705f737e7b11a25181a80617c504d"} Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.474206 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1027957682ffbe5f770111794666df80830705f737e7b11a25181a80617c504d" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.474268 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-h46rf" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.558985 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt"] Nov 25 17:24:56 crc kubenswrapper[4812]: E1125 17:24:56.559665 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c430c12d-7496-4f20-8fa3-bd9ff44844e0" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.559681 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="c430c12d-7496-4f20-8fa3-bd9ff44844e0" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.559883 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="c430c12d-7496-4f20-8fa3-bd9ff44844e0" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.560451 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.565171 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.565385 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.565767 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.565940 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wtld4" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.566075 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.565964 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.566267 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.566300 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.572812 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt"] Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.617645 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.617718 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.617780 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.617815 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.617863 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.617886 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.617908 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.617935 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.617952 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.617998 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.618022 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.618085 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5cpnd\" (UniqueName: \"kubernetes.io/projected/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-kube-api-access-5cpnd\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.618106 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-ceph\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.719903 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.719966 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.720015 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.720042 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.720067 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.720085 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.720102 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.720141 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.720167 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.720207 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5cpnd\" (UniqueName: \"kubernetes.io/projected/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-kube-api-access-5cpnd\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.720229 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-ceph\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.720258 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.720284 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.726636 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.726645 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.726744 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.727644 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.727692 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.728183 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.728259 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.728583 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.728698 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.729129 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-ceph\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.729740 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.730093 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.736663 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5cpnd\" (UniqueName: \"kubernetes.io/projected/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-kube-api-access-5cpnd\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:56 crc kubenswrapper[4812]: I1125 17:24:56.884088 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:24:57 crc kubenswrapper[4812]: I1125 17:24:57.190867 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-dfrdw" Nov 25 17:24:57 crc kubenswrapper[4812]: I1125 17:24:57.191171 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-dfrdw" Nov 25 17:24:57 crc kubenswrapper[4812]: I1125 17:24:57.237481 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-dfrdw" Nov 25 17:24:57 crc kubenswrapper[4812]: I1125 17:24:57.332179 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:24:57 crc kubenswrapper[4812]: I1125 17:24:57.332228 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:24:57 crc kubenswrapper[4812]: I1125 17:24:57.569237 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt"] Nov 25 17:24:57 crc kubenswrapper[4812]: I1125 17:24:57.569908 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-dfrdw" Nov 25 17:24:57 crc kubenswrapper[4812]: I1125 17:24:57.619739 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-dfrdw"] Nov 25 17:24:58 crc kubenswrapper[4812]: I1125 17:24:58.489952 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" event={"ID":"496df5f9-b0c3-4fa4-ac3e-60ec5a986044","Type":"ContainerStarted","Data":"1f5cf44864e30f5ae8f0aca773af37d9a299e1a62849dec15cee3102a942c926"} Nov 25 17:24:58 crc kubenswrapper[4812]: I1125 17:24:58.490279 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" event={"ID":"496df5f9-b0c3-4fa4-ac3e-60ec5a986044","Type":"ContainerStarted","Data":"38fc0e2e826f1ec59f9558bc86f387d4d67735bca394fc4f28127afe1acee0d6"} Nov 25 17:24:58 crc kubenswrapper[4812]: I1125 17:24:58.514207 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" podStartSLOduration=2.074573704 podStartE2EDuration="2.514183331s" podCreationTimestamp="2025-11-25 17:24:56 +0000 UTC" firstStartedPulling="2025-11-25 17:24:57.5477493 +0000 UTC m=+2272.387891395" lastFinishedPulling="2025-11-25 17:24:57.987358927 +0000 UTC m=+2272.827501022" observedRunningTime="2025-11-25 17:24:58.512063973 +0000 UTC m=+2273.352206068" watchObservedRunningTime="2025-11-25 17:24:58.514183331 +0000 UTC m=+2273.354325426" Nov 25 17:24:59 crc kubenswrapper[4812]: I1125 17:24:59.497159 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-dfrdw" podUID="c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867" containerName="registry-server" containerID="cri-o://b3355e4fbfb64260b06c6668ec60fcef7510c0daa7f3154f6b4b5e193ebd7602" gracePeriod=2 Nov 25 17:25:00 crc kubenswrapper[4812]: I1125 17:25:00.506697 4812 generic.go:334] "Generic (PLEG): container finished" podID="c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867" containerID="b3355e4fbfb64260b06c6668ec60fcef7510c0daa7f3154f6b4b5e193ebd7602" exitCode=0 Nov 25 17:25:00 crc kubenswrapper[4812]: I1125 17:25:00.506923 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dfrdw" event={"ID":"c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867","Type":"ContainerDied","Data":"b3355e4fbfb64260b06c6668ec60fcef7510c0daa7f3154f6b4b5e193ebd7602"} Nov 25 17:25:00 crc kubenswrapper[4812]: I1125 17:25:00.507063 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dfrdw" event={"ID":"c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867","Type":"ContainerDied","Data":"c1b45850fce46e8d19405e644a1d445da47f925d268676f06fd98e214df19b1b"} Nov 25 17:25:00 crc kubenswrapper[4812]: I1125 17:25:00.507092 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c1b45850fce46e8d19405e644a1d445da47f925d268676f06fd98e214df19b1b" Nov 25 17:25:00 crc kubenswrapper[4812]: I1125 17:25:00.522439 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dfrdw" Nov 25 17:25:00 crc kubenswrapper[4812]: I1125 17:25:00.614952 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867-utilities\") pod \"c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867\" (UID: \"c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867\") " Nov 25 17:25:00 crc kubenswrapper[4812]: I1125 17:25:00.615136 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867-catalog-content\") pod \"c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867\" (UID: \"c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867\") " Nov 25 17:25:00 crc kubenswrapper[4812]: I1125 17:25:00.615276 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jmng2\" (UniqueName: \"kubernetes.io/projected/c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867-kube-api-access-jmng2\") pod \"c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867\" (UID: \"c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867\") " Nov 25 17:25:00 crc kubenswrapper[4812]: I1125 17:25:00.615754 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867-utilities" (OuterVolumeSpecName: "utilities") pod "c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867" (UID: "c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:25:00 crc kubenswrapper[4812]: I1125 17:25:00.626809 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867-kube-api-access-jmng2" (OuterVolumeSpecName: "kube-api-access-jmng2") pod "c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867" (UID: "c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867"). InnerVolumeSpecName "kube-api-access-jmng2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:25:00 crc kubenswrapper[4812]: I1125 17:25:00.659919 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867" (UID: "c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:25:00 crc kubenswrapper[4812]: I1125 17:25:00.717381 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:25:00 crc kubenswrapper[4812]: I1125 17:25:00.717701 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:25:00 crc kubenswrapper[4812]: I1125 17:25:00.717712 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jmng2\" (UniqueName: \"kubernetes.io/projected/c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867-kube-api-access-jmng2\") on node \"crc\" DevicePath \"\"" Nov 25 17:25:01 crc kubenswrapper[4812]: I1125 17:25:01.518061 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dfrdw" Nov 25 17:25:01 crc kubenswrapper[4812]: I1125 17:25:01.579827 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-dfrdw"] Nov 25 17:25:01 crc kubenswrapper[4812]: I1125 17:25:01.591363 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-dfrdw"] Nov 25 17:25:01 crc kubenswrapper[4812]: I1125 17:25:01.842057 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867" path="/var/lib/kubelet/pods/c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867/volumes" Nov 25 17:25:27 crc kubenswrapper[4812]: I1125 17:25:27.333455 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:25:27 crc kubenswrapper[4812]: I1125 17:25:27.334171 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:25:27 crc kubenswrapper[4812]: I1125 17:25:27.334239 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" Nov 25 17:25:27 crc kubenswrapper[4812]: I1125 17:25:27.335159 4812 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"96bbc51328ecd8c911234471c365bd30cd270eb8ef7d45da0893e8889cdaf1af"} pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 17:25:27 crc kubenswrapper[4812]: I1125 17:25:27.335256 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" containerID="cri-o://96bbc51328ecd8c911234471c365bd30cd270eb8ef7d45da0893e8889cdaf1af" gracePeriod=600 Nov 25 17:25:27 crc kubenswrapper[4812]: E1125 17:25:27.460021 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:25:27 crc kubenswrapper[4812]: I1125 17:25:27.777324 4812 generic.go:334] "Generic (PLEG): container finished" podID="8ed911cf-2139-4b12-84ba-af635585ba29" containerID="96bbc51328ecd8c911234471c365bd30cd270eb8ef7d45da0893e8889cdaf1af" exitCode=0 Nov 25 17:25:27 crc kubenswrapper[4812]: I1125 17:25:27.777391 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" event={"ID":"8ed911cf-2139-4b12-84ba-af635585ba29","Type":"ContainerDied","Data":"96bbc51328ecd8c911234471c365bd30cd270eb8ef7d45da0893e8889cdaf1af"} Nov 25 17:25:27 crc kubenswrapper[4812]: I1125 17:25:27.777453 4812 scope.go:117] "RemoveContainer" containerID="1ce2565d170db8a46eee60e79be5aef9c670f9e94c93580922535f0b2f9ddf57" Nov 25 17:25:27 crc kubenswrapper[4812]: I1125 17:25:27.779310 4812 scope.go:117] "RemoveContainer" containerID="96bbc51328ecd8c911234471c365bd30cd270eb8ef7d45da0893e8889cdaf1af" Nov 25 17:25:27 crc kubenswrapper[4812]: E1125 17:25:27.781692 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:25:30 crc kubenswrapper[4812]: I1125 17:25:30.808376 4812 generic.go:334] "Generic (PLEG): container finished" podID="496df5f9-b0c3-4fa4-ac3e-60ec5a986044" containerID="1f5cf44864e30f5ae8f0aca773af37d9a299e1a62849dec15cee3102a942c926" exitCode=0 Nov 25 17:25:30 crc kubenswrapper[4812]: I1125 17:25:30.808462 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" event={"ID":"496df5f9-b0c3-4fa4-ac3e-60ec5a986044","Type":"ContainerDied","Data":"1f5cf44864e30f5ae8f0aca773af37d9a299e1a62849dec15cee3102a942c926"} Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.270907 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.404313 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-ceph\") pod \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.404748 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-inventory\") pod \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.404875 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-ovn-combined-ca-bundle\") pod \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.405025 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-neutron-metadata-combined-ca-bundle\") pod \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.405126 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-openstack-edpm-ipam-ovn-default-certs-0\") pod \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.405235 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-repo-setup-combined-ca-bundle\") pod \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.405342 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-nova-combined-ca-bundle\") pod \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.405411 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.405486 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.405600 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-libvirt-combined-ca-bundle\") pod \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.405702 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-bootstrap-combined-ca-bundle\") pod \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.405802 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-ssh-key\") pod \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.405886 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5cpnd\" (UniqueName: \"kubernetes.io/projected/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-kube-api-access-5cpnd\") pod \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\" (UID: \"496df5f9-b0c3-4fa4-ac3e-60ec5a986044\") " Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.411242 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "496df5f9-b0c3-4fa4-ac3e-60ec5a986044" (UID: "496df5f9-b0c3-4fa4-ac3e-60ec5a986044"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.411880 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-ceph" (OuterVolumeSpecName: "ceph") pod "496df5f9-b0c3-4fa4-ac3e-60ec5a986044" (UID: "496df5f9-b0c3-4fa4-ac3e-60ec5a986044"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.411947 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "496df5f9-b0c3-4fa4-ac3e-60ec5a986044" (UID: "496df5f9-b0c3-4fa4-ac3e-60ec5a986044"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.411974 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "496df5f9-b0c3-4fa4-ac3e-60ec5a986044" (UID: "496df5f9-b0c3-4fa4-ac3e-60ec5a986044"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.412056 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "496df5f9-b0c3-4fa4-ac3e-60ec5a986044" (UID: "496df5f9-b0c3-4fa4-ac3e-60ec5a986044"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.412313 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "496df5f9-b0c3-4fa4-ac3e-60ec5a986044" (UID: "496df5f9-b0c3-4fa4-ac3e-60ec5a986044"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.412582 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "496df5f9-b0c3-4fa4-ac3e-60ec5a986044" (UID: "496df5f9-b0c3-4fa4-ac3e-60ec5a986044"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.412614 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "496df5f9-b0c3-4fa4-ac3e-60ec5a986044" (UID: "496df5f9-b0c3-4fa4-ac3e-60ec5a986044"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.412961 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-kube-api-access-5cpnd" (OuterVolumeSpecName: "kube-api-access-5cpnd") pod "496df5f9-b0c3-4fa4-ac3e-60ec5a986044" (UID: "496df5f9-b0c3-4fa4-ac3e-60ec5a986044"). InnerVolumeSpecName "kube-api-access-5cpnd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.413105 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "496df5f9-b0c3-4fa4-ac3e-60ec5a986044" (UID: "496df5f9-b0c3-4fa4-ac3e-60ec5a986044"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.413137 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "496df5f9-b0c3-4fa4-ac3e-60ec5a986044" (UID: "496df5f9-b0c3-4fa4-ac3e-60ec5a986044"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.432876 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "496df5f9-b0c3-4fa4-ac3e-60ec5a986044" (UID: "496df5f9-b0c3-4fa4-ac3e-60ec5a986044"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.433289 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-inventory" (OuterVolumeSpecName: "inventory") pod "496df5f9-b0c3-4fa4-ac3e-60ec5a986044" (UID: "496df5f9-b0c3-4fa4-ac3e-60ec5a986044"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.508303 4812 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.508345 4812 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.508360 4812 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.508374 4812 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.508384 4812 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.508393 4812 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.508403 4812 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.508411 4812 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.508423 4812 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.508434 4812 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.508442 4812 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.508451 4812 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.508459 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5cpnd\" (UniqueName: \"kubernetes.io/projected/496df5f9-b0c3-4fa4-ac3e-60ec5a986044-kube-api-access-5cpnd\") on node \"crc\" DevicePath \"\"" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.874321 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" event={"ID":"496df5f9-b0c3-4fa4-ac3e-60ec5a986044","Type":"ContainerDied","Data":"38fc0e2e826f1ec59f9558bc86f387d4d67735bca394fc4f28127afe1acee0d6"} Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.874359 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="38fc0e2e826f1ec59f9558bc86f387d4d67735bca394fc4f28127afe1acee0d6" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.874420 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-hh2gt" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.926542 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-wzrc4"] Nov 25 17:25:32 crc kubenswrapper[4812]: E1125 17:25:32.927021 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="496df5f9-b0c3-4fa4-ac3e-60ec5a986044" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.927044 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="496df5f9-b0c3-4fa4-ac3e-60ec5a986044" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 25 17:25:32 crc kubenswrapper[4812]: E1125 17:25:32.927099 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867" containerName="registry-server" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.927108 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867" containerName="registry-server" Nov 25 17:25:32 crc kubenswrapper[4812]: E1125 17:25:32.927126 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867" containerName="extract-content" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.927135 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867" containerName="extract-content" Nov 25 17:25:32 crc kubenswrapper[4812]: E1125 17:25:32.927149 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867" containerName="extract-utilities" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.927158 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867" containerName="extract-utilities" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.928678 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="c05d7dbc-3d2f-40c2-b9e3-c4e137bb0867" containerName="registry-server" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.928714 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="496df5f9-b0c3-4fa4-ac3e-60ec5a986044" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.929621 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-wzrc4" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.932848 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.933061 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.933155 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.934076 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.936495 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wtld4" Nov 25 17:25:32 crc kubenswrapper[4812]: I1125 17:25:32.941741 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-wzrc4"] Nov 25 17:25:33 crc kubenswrapper[4812]: I1125 17:25:33.021052 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ceae780c-dc51-4dcd-82d0-d97c65929413-inventory\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-wzrc4\" (UID: \"ceae780c-dc51-4dcd-82d0-d97c65929413\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-wzrc4" Nov 25 17:25:33 crc kubenswrapper[4812]: I1125 17:25:33.021110 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fxknt\" (UniqueName: \"kubernetes.io/projected/ceae780c-dc51-4dcd-82d0-d97c65929413-kube-api-access-fxknt\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-wzrc4\" (UID: \"ceae780c-dc51-4dcd-82d0-d97c65929413\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-wzrc4" Nov 25 17:25:33 crc kubenswrapper[4812]: I1125 17:25:33.021300 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ceae780c-dc51-4dcd-82d0-d97c65929413-ceph\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-wzrc4\" (UID: \"ceae780c-dc51-4dcd-82d0-d97c65929413\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-wzrc4" Nov 25 17:25:33 crc kubenswrapper[4812]: I1125 17:25:33.021407 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ceae780c-dc51-4dcd-82d0-d97c65929413-ssh-key\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-wzrc4\" (UID: \"ceae780c-dc51-4dcd-82d0-d97c65929413\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-wzrc4" Nov 25 17:25:33 crc kubenswrapper[4812]: I1125 17:25:33.123953 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ceae780c-dc51-4dcd-82d0-d97c65929413-inventory\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-wzrc4\" (UID: \"ceae780c-dc51-4dcd-82d0-d97c65929413\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-wzrc4" Nov 25 17:25:33 crc kubenswrapper[4812]: I1125 17:25:33.124036 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fxknt\" (UniqueName: \"kubernetes.io/projected/ceae780c-dc51-4dcd-82d0-d97c65929413-kube-api-access-fxknt\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-wzrc4\" (UID: \"ceae780c-dc51-4dcd-82d0-d97c65929413\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-wzrc4" Nov 25 17:25:33 crc kubenswrapper[4812]: I1125 17:25:33.124132 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ceae780c-dc51-4dcd-82d0-d97c65929413-ceph\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-wzrc4\" (UID: \"ceae780c-dc51-4dcd-82d0-d97c65929413\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-wzrc4" Nov 25 17:25:33 crc kubenswrapper[4812]: I1125 17:25:33.124170 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ceae780c-dc51-4dcd-82d0-d97c65929413-ssh-key\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-wzrc4\" (UID: \"ceae780c-dc51-4dcd-82d0-d97c65929413\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-wzrc4" Nov 25 17:25:33 crc kubenswrapper[4812]: I1125 17:25:33.128267 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ceae780c-dc51-4dcd-82d0-d97c65929413-inventory\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-wzrc4\" (UID: \"ceae780c-dc51-4dcd-82d0-d97c65929413\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-wzrc4" Nov 25 17:25:33 crc kubenswrapper[4812]: I1125 17:25:33.128854 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ceae780c-dc51-4dcd-82d0-d97c65929413-ceph\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-wzrc4\" (UID: \"ceae780c-dc51-4dcd-82d0-d97c65929413\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-wzrc4" Nov 25 17:25:33 crc kubenswrapper[4812]: I1125 17:25:33.129588 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ceae780c-dc51-4dcd-82d0-d97c65929413-ssh-key\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-wzrc4\" (UID: \"ceae780c-dc51-4dcd-82d0-d97c65929413\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-wzrc4" Nov 25 17:25:33 crc kubenswrapper[4812]: I1125 17:25:33.143452 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fxknt\" (UniqueName: \"kubernetes.io/projected/ceae780c-dc51-4dcd-82d0-d97c65929413-kube-api-access-fxknt\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-wzrc4\" (UID: \"ceae780c-dc51-4dcd-82d0-d97c65929413\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-wzrc4" Nov 25 17:25:33 crc kubenswrapper[4812]: I1125 17:25:33.254113 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-wzrc4" Nov 25 17:25:33 crc kubenswrapper[4812]: I1125 17:25:33.738463 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-wzrc4"] Nov 25 17:25:33 crc kubenswrapper[4812]: I1125 17:25:33.741932 4812 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 17:25:33 crc kubenswrapper[4812]: I1125 17:25:33.884942 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-wzrc4" event={"ID":"ceae780c-dc51-4dcd-82d0-d97c65929413","Type":"ContainerStarted","Data":"4e4e38112668303c1fd0f0a2f442c88f4c25a7c0a111198c4253cb9ddf9b60c1"} Nov 25 17:25:34 crc kubenswrapper[4812]: I1125 17:25:34.894899 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-wzrc4" event={"ID":"ceae780c-dc51-4dcd-82d0-d97c65929413","Type":"ContainerStarted","Data":"db0b022f7d2e9bec3e2d6aabc6541456ebe10dd664ec8548c87cf22e3575c74b"} Nov 25 17:25:34 crc kubenswrapper[4812]: I1125 17:25:34.909515 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-wzrc4" podStartSLOduration=2.4025187949999998 podStartE2EDuration="2.909491585s" podCreationTimestamp="2025-11-25 17:25:32 +0000 UTC" firstStartedPulling="2025-11-25 17:25:33.741672655 +0000 UTC m=+2308.581814750" lastFinishedPulling="2025-11-25 17:25:34.248645445 +0000 UTC m=+2309.088787540" observedRunningTime="2025-11-25 17:25:34.908051996 +0000 UTC m=+2309.748194091" watchObservedRunningTime="2025-11-25 17:25:34.909491585 +0000 UTC m=+2309.749633690" Nov 25 17:25:39 crc kubenswrapper[4812]: I1125 17:25:39.936582 4812 generic.go:334] "Generic (PLEG): container finished" podID="ceae780c-dc51-4dcd-82d0-d97c65929413" containerID="db0b022f7d2e9bec3e2d6aabc6541456ebe10dd664ec8548c87cf22e3575c74b" exitCode=0 Nov 25 17:25:39 crc kubenswrapper[4812]: I1125 17:25:39.936703 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-wzrc4" event={"ID":"ceae780c-dc51-4dcd-82d0-d97c65929413","Type":"ContainerDied","Data":"db0b022f7d2e9bec3e2d6aabc6541456ebe10dd664ec8548c87cf22e3575c74b"} Nov 25 17:25:41 crc kubenswrapper[4812]: I1125 17:25:41.377351 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-wzrc4" Nov 25 17:25:41 crc kubenswrapper[4812]: I1125 17:25:41.472251 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ceae780c-dc51-4dcd-82d0-d97c65929413-ssh-key\") pod \"ceae780c-dc51-4dcd-82d0-d97c65929413\" (UID: \"ceae780c-dc51-4dcd-82d0-d97c65929413\") " Nov 25 17:25:41 crc kubenswrapper[4812]: I1125 17:25:41.472315 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fxknt\" (UniqueName: \"kubernetes.io/projected/ceae780c-dc51-4dcd-82d0-d97c65929413-kube-api-access-fxknt\") pod \"ceae780c-dc51-4dcd-82d0-d97c65929413\" (UID: \"ceae780c-dc51-4dcd-82d0-d97c65929413\") " Nov 25 17:25:41 crc kubenswrapper[4812]: I1125 17:25:41.472340 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ceae780c-dc51-4dcd-82d0-d97c65929413-inventory\") pod \"ceae780c-dc51-4dcd-82d0-d97c65929413\" (UID: \"ceae780c-dc51-4dcd-82d0-d97c65929413\") " Nov 25 17:25:41 crc kubenswrapper[4812]: I1125 17:25:41.472584 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ceae780c-dc51-4dcd-82d0-d97c65929413-ceph\") pod \"ceae780c-dc51-4dcd-82d0-d97c65929413\" (UID: \"ceae780c-dc51-4dcd-82d0-d97c65929413\") " Nov 25 17:25:41 crc kubenswrapper[4812]: I1125 17:25:41.477283 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ceae780c-dc51-4dcd-82d0-d97c65929413-kube-api-access-fxknt" (OuterVolumeSpecName: "kube-api-access-fxknt") pod "ceae780c-dc51-4dcd-82d0-d97c65929413" (UID: "ceae780c-dc51-4dcd-82d0-d97c65929413"). InnerVolumeSpecName "kube-api-access-fxknt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:25:41 crc kubenswrapper[4812]: I1125 17:25:41.477834 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ceae780c-dc51-4dcd-82d0-d97c65929413-ceph" (OuterVolumeSpecName: "ceph") pod "ceae780c-dc51-4dcd-82d0-d97c65929413" (UID: "ceae780c-dc51-4dcd-82d0-d97c65929413"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:25:41 crc kubenswrapper[4812]: I1125 17:25:41.499399 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ceae780c-dc51-4dcd-82d0-d97c65929413-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ceae780c-dc51-4dcd-82d0-d97c65929413" (UID: "ceae780c-dc51-4dcd-82d0-d97c65929413"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:25:41 crc kubenswrapper[4812]: I1125 17:25:41.507641 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ceae780c-dc51-4dcd-82d0-d97c65929413-inventory" (OuterVolumeSpecName: "inventory") pod "ceae780c-dc51-4dcd-82d0-d97c65929413" (UID: "ceae780c-dc51-4dcd-82d0-d97c65929413"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:25:41 crc kubenswrapper[4812]: I1125 17:25:41.573931 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fxknt\" (UniqueName: \"kubernetes.io/projected/ceae780c-dc51-4dcd-82d0-d97c65929413-kube-api-access-fxknt\") on node \"crc\" DevicePath \"\"" Nov 25 17:25:41 crc kubenswrapper[4812]: I1125 17:25:41.573961 4812 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ceae780c-dc51-4dcd-82d0-d97c65929413-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 17:25:41 crc kubenswrapper[4812]: I1125 17:25:41.573970 4812 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ceae780c-dc51-4dcd-82d0-d97c65929413-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 17:25:41 crc kubenswrapper[4812]: I1125 17:25:41.573978 4812 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ceae780c-dc51-4dcd-82d0-d97c65929413-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 17:25:41 crc kubenswrapper[4812]: I1125 17:25:41.831581 4812 scope.go:117] "RemoveContainer" containerID="96bbc51328ecd8c911234471c365bd30cd270eb8ef7d45da0893e8889cdaf1af" Nov 25 17:25:41 crc kubenswrapper[4812]: E1125 17:25:41.831947 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:25:41 crc kubenswrapper[4812]: I1125 17:25:41.957817 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-wzrc4" event={"ID":"ceae780c-dc51-4dcd-82d0-d97c65929413","Type":"ContainerDied","Data":"4e4e38112668303c1fd0f0a2f442c88f4c25a7c0a111198c4253cb9ddf9b60c1"} Nov 25 17:25:41 crc kubenswrapper[4812]: I1125 17:25:41.957864 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4e4e38112668303c1fd0f0a2f442c88f4c25a7c0a111198c4253cb9ddf9b60c1" Nov 25 17:25:41 crc kubenswrapper[4812]: I1125 17:25:41.957897 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-wzrc4" Nov 25 17:25:42 crc kubenswrapper[4812]: I1125 17:25:42.045935 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-6hkx5"] Nov 25 17:25:42 crc kubenswrapper[4812]: E1125 17:25:42.046687 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ceae780c-dc51-4dcd-82d0-d97c65929413" containerName="ceph-client-edpm-deployment-openstack-edpm-ipam" Nov 25 17:25:42 crc kubenswrapper[4812]: I1125 17:25:42.046711 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="ceae780c-dc51-4dcd-82d0-d97c65929413" containerName="ceph-client-edpm-deployment-openstack-edpm-ipam" Nov 25 17:25:42 crc kubenswrapper[4812]: I1125 17:25:42.046929 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="ceae780c-dc51-4dcd-82d0-d97c65929413" containerName="ceph-client-edpm-deployment-openstack-edpm-ipam" Nov 25 17:25:42 crc kubenswrapper[4812]: I1125 17:25:42.047583 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-6hkx5" Nov 25 17:25:42 crc kubenswrapper[4812]: I1125 17:25:42.051378 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 17:25:42 crc kubenswrapper[4812]: I1125 17:25:42.051415 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 17:25:42 crc kubenswrapper[4812]: I1125 17:25:42.051427 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 17:25:42 crc kubenswrapper[4812]: I1125 17:25:42.051589 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Nov 25 17:25:42 crc kubenswrapper[4812]: I1125 17:25:42.051919 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wtld4" Nov 25 17:25:42 crc kubenswrapper[4812]: I1125 17:25:42.052405 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 17:25:42 crc kubenswrapper[4812]: I1125 17:25:42.057664 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-6hkx5"] Nov 25 17:25:42 crc kubenswrapper[4812]: I1125 17:25:42.184116 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38070011-6cb9-48ac-9b5d-5a43e0120187-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-6hkx5\" (UID: \"38070011-6cb9-48ac-9b5d-5a43e0120187\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-6hkx5" Nov 25 17:25:42 crc kubenswrapper[4812]: I1125 17:25:42.184193 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/38070011-6cb9-48ac-9b5d-5a43e0120187-ceph\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-6hkx5\" (UID: \"38070011-6cb9-48ac-9b5d-5a43e0120187\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-6hkx5" Nov 25 17:25:42 crc kubenswrapper[4812]: I1125 17:25:42.184408 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/38070011-6cb9-48ac-9b5d-5a43e0120187-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-6hkx5\" (UID: \"38070011-6cb9-48ac-9b5d-5a43e0120187\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-6hkx5" Nov 25 17:25:42 crc kubenswrapper[4812]: I1125 17:25:42.184557 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/38070011-6cb9-48ac-9b5d-5a43e0120187-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-6hkx5\" (UID: \"38070011-6cb9-48ac-9b5d-5a43e0120187\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-6hkx5" Nov 25 17:25:42 crc kubenswrapper[4812]: I1125 17:25:42.184678 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kg6lc\" (UniqueName: \"kubernetes.io/projected/38070011-6cb9-48ac-9b5d-5a43e0120187-kube-api-access-kg6lc\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-6hkx5\" (UID: \"38070011-6cb9-48ac-9b5d-5a43e0120187\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-6hkx5" Nov 25 17:25:42 crc kubenswrapper[4812]: I1125 17:25:42.184796 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/38070011-6cb9-48ac-9b5d-5a43e0120187-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-6hkx5\" (UID: \"38070011-6cb9-48ac-9b5d-5a43e0120187\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-6hkx5" Nov 25 17:25:42 crc kubenswrapper[4812]: I1125 17:25:42.286775 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/38070011-6cb9-48ac-9b5d-5a43e0120187-ceph\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-6hkx5\" (UID: \"38070011-6cb9-48ac-9b5d-5a43e0120187\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-6hkx5" Nov 25 17:25:42 crc kubenswrapper[4812]: I1125 17:25:42.286879 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/38070011-6cb9-48ac-9b5d-5a43e0120187-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-6hkx5\" (UID: \"38070011-6cb9-48ac-9b5d-5a43e0120187\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-6hkx5" Nov 25 17:25:42 crc kubenswrapper[4812]: I1125 17:25:42.286917 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/38070011-6cb9-48ac-9b5d-5a43e0120187-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-6hkx5\" (UID: \"38070011-6cb9-48ac-9b5d-5a43e0120187\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-6hkx5" Nov 25 17:25:42 crc kubenswrapper[4812]: I1125 17:25:42.286953 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kg6lc\" (UniqueName: \"kubernetes.io/projected/38070011-6cb9-48ac-9b5d-5a43e0120187-kube-api-access-kg6lc\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-6hkx5\" (UID: \"38070011-6cb9-48ac-9b5d-5a43e0120187\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-6hkx5" Nov 25 17:25:42 crc kubenswrapper[4812]: I1125 17:25:42.286995 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/38070011-6cb9-48ac-9b5d-5a43e0120187-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-6hkx5\" (UID: \"38070011-6cb9-48ac-9b5d-5a43e0120187\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-6hkx5" Nov 25 17:25:42 crc kubenswrapper[4812]: I1125 17:25:42.287027 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38070011-6cb9-48ac-9b5d-5a43e0120187-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-6hkx5\" (UID: \"38070011-6cb9-48ac-9b5d-5a43e0120187\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-6hkx5" Nov 25 17:25:42 crc kubenswrapper[4812]: I1125 17:25:42.288015 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/38070011-6cb9-48ac-9b5d-5a43e0120187-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-6hkx5\" (UID: \"38070011-6cb9-48ac-9b5d-5a43e0120187\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-6hkx5" Nov 25 17:25:42 crc kubenswrapper[4812]: I1125 17:25:42.291592 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/38070011-6cb9-48ac-9b5d-5a43e0120187-ceph\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-6hkx5\" (UID: \"38070011-6cb9-48ac-9b5d-5a43e0120187\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-6hkx5" Nov 25 17:25:42 crc kubenswrapper[4812]: I1125 17:25:42.292387 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38070011-6cb9-48ac-9b5d-5a43e0120187-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-6hkx5\" (UID: \"38070011-6cb9-48ac-9b5d-5a43e0120187\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-6hkx5" Nov 25 17:25:42 crc kubenswrapper[4812]: I1125 17:25:42.293230 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/38070011-6cb9-48ac-9b5d-5a43e0120187-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-6hkx5\" (UID: \"38070011-6cb9-48ac-9b5d-5a43e0120187\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-6hkx5" Nov 25 17:25:42 crc kubenswrapper[4812]: I1125 17:25:42.296498 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/38070011-6cb9-48ac-9b5d-5a43e0120187-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-6hkx5\" (UID: \"38070011-6cb9-48ac-9b5d-5a43e0120187\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-6hkx5" Nov 25 17:25:42 crc kubenswrapper[4812]: I1125 17:25:42.309385 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kg6lc\" (UniqueName: \"kubernetes.io/projected/38070011-6cb9-48ac-9b5d-5a43e0120187-kube-api-access-kg6lc\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-6hkx5\" (UID: \"38070011-6cb9-48ac-9b5d-5a43e0120187\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-6hkx5" Nov 25 17:25:42 crc kubenswrapper[4812]: I1125 17:25:42.373078 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-6hkx5" Nov 25 17:25:42 crc kubenswrapper[4812]: I1125 17:25:42.891460 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-6hkx5"] Nov 25 17:25:42 crc kubenswrapper[4812]: I1125 17:25:42.965662 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-6hkx5" event={"ID":"38070011-6cb9-48ac-9b5d-5a43e0120187","Type":"ContainerStarted","Data":"e7737595993f9c6afebba1e714b60e44e8f32a3ae6b7e8375e64df972abb160d"} Nov 25 17:25:43 crc kubenswrapper[4812]: I1125 17:25:43.975040 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-6hkx5" event={"ID":"38070011-6cb9-48ac-9b5d-5a43e0120187","Type":"ContainerStarted","Data":"b097e295eb9cb2f9a76decffcec70fd9915e701fff935d047ffa1d8b380d0dd6"} Nov 25 17:25:44 crc kubenswrapper[4812]: I1125 17:25:44.002378 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-6hkx5" podStartSLOduration=1.252854169 podStartE2EDuration="2.002348393s" podCreationTimestamp="2025-11-25 17:25:42 +0000 UTC" firstStartedPulling="2025-11-25 17:25:42.897238001 +0000 UTC m=+2317.737380106" lastFinishedPulling="2025-11-25 17:25:43.646732195 +0000 UTC m=+2318.486874330" observedRunningTime="2025-11-25 17:25:43.996160607 +0000 UTC m=+2318.836302712" watchObservedRunningTime="2025-11-25 17:25:44.002348393 +0000 UTC m=+2318.842490498" Nov 25 17:25:55 crc kubenswrapper[4812]: I1125 17:25:55.838110 4812 scope.go:117] "RemoveContainer" containerID="96bbc51328ecd8c911234471c365bd30cd270eb8ef7d45da0893e8889cdaf1af" Nov 25 17:25:55 crc kubenswrapper[4812]: E1125 17:25:55.839035 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:26:08 crc kubenswrapper[4812]: I1125 17:26:08.831915 4812 scope.go:117] "RemoveContainer" containerID="96bbc51328ecd8c911234471c365bd30cd270eb8ef7d45da0893e8889cdaf1af" Nov 25 17:26:08 crc kubenswrapper[4812]: E1125 17:26:08.832902 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:26:20 crc kubenswrapper[4812]: I1125 17:26:20.832641 4812 scope.go:117] "RemoveContainer" containerID="96bbc51328ecd8c911234471c365bd30cd270eb8ef7d45da0893e8889cdaf1af" Nov 25 17:26:20 crc kubenswrapper[4812]: E1125 17:26:20.834140 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:26:33 crc kubenswrapper[4812]: I1125 17:26:33.832451 4812 scope.go:117] "RemoveContainer" containerID="96bbc51328ecd8c911234471c365bd30cd270eb8ef7d45da0893e8889cdaf1af" Nov 25 17:26:33 crc kubenswrapper[4812]: E1125 17:26:33.833306 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:26:46 crc kubenswrapper[4812]: I1125 17:26:46.831136 4812 scope.go:117] "RemoveContainer" containerID="96bbc51328ecd8c911234471c365bd30cd270eb8ef7d45da0893e8889cdaf1af" Nov 25 17:26:46 crc kubenswrapper[4812]: E1125 17:26:46.831725 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:26:52 crc kubenswrapper[4812]: I1125 17:26:52.602233 4812 generic.go:334] "Generic (PLEG): container finished" podID="38070011-6cb9-48ac-9b5d-5a43e0120187" containerID="b097e295eb9cb2f9a76decffcec70fd9915e701fff935d047ffa1d8b380d0dd6" exitCode=0 Nov 25 17:26:52 crc kubenswrapper[4812]: I1125 17:26:52.602314 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-6hkx5" event={"ID":"38070011-6cb9-48ac-9b5d-5a43e0120187","Type":"ContainerDied","Data":"b097e295eb9cb2f9a76decffcec70fd9915e701fff935d047ffa1d8b380d0dd6"} Nov 25 17:26:53 crc kubenswrapper[4812]: I1125 17:26:53.996196 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-6hkx5" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.071721 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kg6lc\" (UniqueName: \"kubernetes.io/projected/38070011-6cb9-48ac-9b5d-5a43e0120187-kube-api-access-kg6lc\") pod \"38070011-6cb9-48ac-9b5d-5a43e0120187\" (UID: \"38070011-6cb9-48ac-9b5d-5a43e0120187\") " Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.071785 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/38070011-6cb9-48ac-9b5d-5a43e0120187-ceph\") pod \"38070011-6cb9-48ac-9b5d-5a43e0120187\" (UID: \"38070011-6cb9-48ac-9b5d-5a43e0120187\") " Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.071806 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/38070011-6cb9-48ac-9b5d-5a43e0120187-inventory\") pod \"38070011-6cb9-48ac-9b5d-5a43e0120187\" (UID: \"38070011-6cb9-48ac-9b5d-5a43e0120187\") " Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.071834 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/38070011-6cb9-48ac-9b5d-5a43e0120187-ovncontroller-config-0\") pod \"38070011-6cb9-48ac-9b5d-5a43e0120187\" (UID: \"38070011-6cb9-48ac-9b5d-5a43e0120187\") " Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.071891 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38070011-6cb9-48ac-9b5d-5a43e0120187-ovn-combined-ca-bundle\") pod \"38070011-6cb9-48ac-9b5d-5a43e0120187\" (UID: \"38070011-6cb9-48ac-9b5d-5a43e0120187\") " Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.071972 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/38070011-6cb9-48ac-9b5d-5a43e0120187-ssh-key\") pod \"38070011-6cb9-48ac-9b5d-5a43e0120187\" (UID: \"38070011-6cb9-48ac-9b5d-5a43e0120187\") " Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.077254 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38070011-6cb9-48ac-9b5d-5a43e0120187-kube-api-access-kg6lc" (OuterVolumeSpecName: "kube-api-access-kg6lc") pod "38070011-6cb9-48ac-9b5d-5a43e0120187" (UID: "38070011-6cb9-48ac-9b5d-5a43e0120187"). InnerVolumeSpecName "kube-api-access-kg6lc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.077366 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38070011-6cb9-48ac-9b5d-5a43e0120187-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "38070011-6cb9-48ac-9b5d-5a43e0120187" (UID: "38070011-6cb9-48ac-9b5d-5a43e0120187"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.078758 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38070011-6cb9-48ac-9b5d-5a43e0120187-ceph" (OuterVolumeSpecName: "ceph") pod "38070011-6cb9-48ac-9b5d-5a43e0120187" (UID: "38070011-6cb9-48ac-9b5d-5a43e0120187"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.098982 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/38070011-6cb9-48ac-9b5d-5a43e0120187-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "38070011-6cb9-48ac-9b5d-5a43e0120187" (UID: "38070011-6cb9-48ac-9b5d-5a43e0120187"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.105968 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38070011-6cb9-48ac-9b5d-5a43e0120187-inventory" (OuterVolumeSpecName: "inventory") pod "38070011-6cb9-48ac-9b5d-5a43e0120187" (UID: "38070011-6cb9-48ac-9b5d-5a43e0120187"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.105962 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38070011-6cb9-48ac-9b5d-5a43e0120187-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "38070011-6cb9-48ac-9b5d-5a43e0120187" (UID: "38070011-6cb9-48ac-9b5d-5a43e0120187"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.175785 4812 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38070011-6cb9-48ac-9b5d-5a43e0120187-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.175821 4812 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/38070011-6cb9-48ac-9b5d-5a43e0120187-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.175830 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kg6lc\" (UniqueName: \"kubernetes.io/projected/38070011-6cb9-48ac-9b5d-5a43e0120187-kube-api-access-kg6lc\") on node \"crc\" DevicePath \"\"" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.175841 4812 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/38070011-6cb9-48ac-9b5d-5a43e0120187-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.175849 4812 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/38070011-6cb9-48ac-9b5d-5a43e0120187-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.175859 4812 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/38070011-6cb9-48ac-9b5d-5a43e0120187-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.623104 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-6hkx5" event={"ID":"38070011-6cb9-48ac-9b5d-5a43e0120187","Type":"ContainerDied","Data":"e7737595993f9c6afebba1e714b60e44e8f32a3ae6b7e8375e64df972abb160d"} Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.623160 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-6hkx5" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.623179 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e7737595993f9c6afebba1e714b60e44e8f32a3ae6b7e8375e64df972abb160d" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.726143 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm"] Nov 25 17:26:54 crc kubenswrapper[4812]: E1125 17:26:54.726468 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38070011-6cb9-48ac-9b5d-5a43e0120187" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.726486 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="38070011-6cb9-48ac-9b5d-5a43e0120187" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.726694 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="38070011-6cb9-48ac-9b5d-5a43e0120187" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.727271 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.730348 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.730521 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.730766 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.732328 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.732603 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.732688 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.733201 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wtld4" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.751295 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm"] Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.887610 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm\" (UID: \"09c619e9-dd51-4931-bb0f-1f8c11d9e55d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.887670 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm\" (UID: \"09c619e9-dd51-4931-bb0f-1f8c11d9e55d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.887800 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm\" (UID: \"09c619e9-dd51-4931-bb0f-1f8c11d9e55d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.887999 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm\" (UID: \"09c619e9-dd51-4931-bb0f-1f8c11d9e55d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.888054 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm\" (UID: \"09c619e9-dd51-4931-bb0f-1f8c11d9e55d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.888211 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qxtxn\" (UniqueName: \"kubernetes.io/projected/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-kube-api-access-qxtxn\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm\" (UID: \"09c619e9-dd51-4931-bb0f-1f8c11d9e55d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.888375 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-ceph\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm\" (UID: \"09c619e9-dd51-4931-bb0f-1f8c11d9e55d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.990678 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm\" (UID: \"09c619e9-dd51-4931-bb0f-1f8c11d9e55d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.991038 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm\" (UID: \"09c619e9-dd51-4931-bb0f-1f8c11d9e55d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.991078 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm\" (UID: \"09c619e9-dd51-4931-bb0f-1f8c11d9e55d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.991244 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm\" (UID: \"09c619e9-dd51-4931-bb0f-1f8c11d9e55d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.991283 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm\" (UID: \"09c619e9-dd51-4931-bb0f-1f8c11d9e55d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.991336 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qxtxn\" (UniqueName: \"kubernetes.io/projected/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-kube-api-access-qxtxn\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm\" (UID: \"09c619e9-dd51-4931-bb0f-1f8c11d9e55d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.991406 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-ceph\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm\" (UID: \"09c619e9-dd51-4931-bb0f-1f8c11d9e55d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.996296 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm\" (UID: \"09c619e9-dd51-4931-bb0f-1f8c11d9e55d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.996309 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-ceph\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm\" (UID: \"09c619e9-dd51-4931-bb0f-1f8c11d9e55d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.996986 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm\" (UID: \"09c619e9-dd51-4931-bb0f-1f8c11d9e55d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.997120 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm\" (UID: \"09c619e9-dd51-4931-bb0f-1f8c11d9e55d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.998262 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm\" (UID: \"09c619e9-dd51-4931-bb0f-1f8c11d9e55d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm" Nov 25 17:26:54 crc kubenswrapper[4812]: I1125 17:26:54.998987 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm\" (UID: \"09c619e9-dd51-4931-bb0f-1f8c11d9e55d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm" Nov 25 17:26:55 crc kubenswrapper[4812]: I1125 17:26:55.016484 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qxtxn\" (UniqueName: \"kubernetes.io/projected/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-kube-api-access-qxtxn\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm\" (UID: \"09c619e9-dd51-4931-bb0f-1f8c11d9e55d\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm" Nov 25 17:26:55 crc kubenswrapper[4812]: I1125 17:26:55.050353 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm" Nov 25 17:26:55 crc kubenswrapper[4812]: I1125 17:26:55.609506 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm"] Nov 25 17:26:55 crc kubenswrapper[4812]: I1125 17:26:55.632257 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm" event={"ID":"09c619e9-dd51-4931-bb0f-1f8c11d9e55d","Type":"ContainerStarted","Data":"653a3b7b295f918b9008f259a680c97dd4d8b5ed33fb77b454e906fc6ed7ff46"} Nov 25 17:26:56 crc kubenswrapper[4812]: I1125 17:26:56.642891 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm" event={"ID":"09c619e9-dd51-4931-bb0f-1f8c11d9e55d","Type":"ContainerStarted","Data":"b981df8571850271c37cf117500b8b76153be3d424de7490b97a738679d0a88a"} Nov 25 17:26:56 crc kubenswrapper[4812]: I1125 17:26:56.662174 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm" podStartSLOduration=2.113121668 podStartE2EDuration="2.662157639s" podCreationTimestamp="2025-11-25 17:26:54 +0000 UTC" firstStartedPulling="2025-11-25 17:26:55.605657345 +0000 UTC m=+2390.445799440" lastFinishedPulling="2025-11-25 17:26:56.154693276 +0000 UTC m=+2390.994835411" observedRunningTime="2025-11-25 17:26:56.658995104 +0000 UTC m=+2391.499137199" watchObservedRunningTime="2025-11-25 17:26:56.662157639 +0000 UTC m=+2391.502299734" Nov 25 17:27:00 crc kubenswrapper[4812]: I1125 17:27:00.832417 4812 scope.go:117] "RemoveContainer" containerID="96bbc51328ecd8c911234471c365bd30cd270eb8ef7d45da0893e8889cdaf1af" Nov 25 17:27:00 crc kubenswrapper[4812]: E1125 17:27:00.833174 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:27:11 crc kubenswrapper[4812]: I1125 17:27:11.831586 4812 scope.go:117] "RemoveContainer" containerID="96bbc51328ecd8c911234471c365bd30cd270eb8ef7d45da0893e8889cdaf1af" Nov 25 17:27:11 crc kubenswrapper[4812]: E1125 17:27:11.832286 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:27:26 crc kubenswrapper[4812]: I1125 17:27:26.831601 4812 scope.go:117] "RemoveContainer" containerID="96bbc51328ecd8c911234471c365bd30cd270eb8ef7d45da0893e8889cdaf1af" Nov 25 17:27:26 crc kubenswrapper[4812]: E1125 17:27:26.832289 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:27:41 crc kubenswrapper[4812]: I1125 17:27:41.831261 4812 scope.go:117] "RemoveContainer" containerID="96bbc51328ecd8c911234471c365bd30cd270eb8ef7d45da0893e8889cdaf1af" Nov 25 17:27:41 crc kubenswrapper[4812]: E1125 17:27:41.832042 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:27:52 crc kubenswrapper[4812]: I1125 17:27:52.831507 4812 scope.go:117] "RemoveContainer" containerID="96bbc51328ecd8c911234471c365bd30cd270eb8ef7d45da0893e8889cdaf1af" Nov 25 17:27:52 crc kubenswrapper[4812]: E1125 17:27:52.834019 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:28:03 crc kubenswrapper[4812]: I1125 17:28:03.206403 4812 generic.go:334] "Generic (PLEG): container finished" podID="09c619e9-dd51-4931-bb0f-1f8c11d9e55d" containerID="b981df8571850271c37cf117500b8b76153be3d424de7490b97a738679d0a88a" exitCode=0 Nov 25 17:28:03 crc kubenswrapper[4812]: I1125 17:28:03.206476 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm" event={"ID":"09c619e9-dd51-4931-bb0f-1f8c11d9e55d","Type":"ContainerDied","Data":"b981df8571850271c37cf117500b8b76153be3d424de7490b97a738679d0a88a"} Nov 25 17:28:04 crc kubenswrapper[4812]: I1125 17:28:04.630556 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm" Nov 25 17:28:04 crc kubenswrapper[4812]: I1125 17:28:04.829129 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-nova-metadata-neutron-config-0\") pod \"09c619e9-dd51-4931-bb0f-1f8c11d9e55d\" (UID: \"09c619e9-dd51-4931-bb0f-1f8c11d9e55d\") " Nov 25 17:28:04 crc kubenswrapper[4812]: I1125 17:28:04.829209 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qxtxn\" (UniqueName: \"kubernetes.io/projected/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-kube-api-access-qxtxn\") pod \"09c619e9-dd51-4931-bb0f-1f8c11d9e55d\" (UID: \"09c619e9-dd51-4931-bb0f-1f8c11d9e55d\") " Nov 25 17:28:04 crc kubenswrapper[4812]: I1125 17:28:04.829249 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-inventory\") pod \"09c619e9-dd51-4931-bb0f-1f8c11d9e55d\" (UID: \"09c619e9-dd51-4931-bb0f-1f8c11d9e55d\") " Nov 25 17:28:04 crc kubenswrapper[4812]: I1125 17:28:04.829285 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-ceph\") pod \"09c619e9-dd51-4931-bb0f-1f8c11d9e55d\" (UID: \"09c619e9-dd51-4931-bb0f-1f8c11d9e55d\") " Nov 25 17:28:04 crc kubenswrapper[4812]: I1125 17:28:04.829360 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-neutron-metadata-combined-ca-bundle\") pod \"09c619e9-dd51-4931-bb0f-1f8c11d9e55d\" (UID: \"09c619e9-dd51-4931-bb0f-1f8c11d9e55d\") " Nov 25 17:28:04 crc kubenswrapper[4812]: I1125 17:28:04.829493 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-ssh-key\") pod \"09c619e9-dd51-4931-bb0f-1f8c11d9e55d\" (UID: \"09c619e9-dd51-4931-bb0f-1f8c11d9e55d\") " Nov 25 17:28:04 crc kubenswrapper[4812]: I1125 17:28:04.829619 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-neutron-ovn-metadata-agent-neutron-config-0\") pod \"09c619e9-dd51-4931-bb0f-1f8c11d9e55d\" (UID: \"09c619e9-dd51-4931-bb0f-1f8c11d9e55d\") " Nov 25 17:28:04 crc kubenswrapper[4812]: I1125 17:28:04.835611 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-ceph" (OuterVolumeSpecName: "ceph") pod "09c619e9-dd51-4931-bb0f-1f8c11d9e55d" (UID: "09c619e9-dd51-4931-bb0f-1f8c11d9e55d"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:28:04 crc kubenswrapper[4812]: I1125 17:28:04.836187 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-kube-api-access-qxtxn" (OuterVolumeSpecName: "kube-api-access-qxtxn") pod "09c619e9-dd51-4931-bb0f-1f8c11d9e55d" (UID: "09c619e9-dd51-4931-bb0f-1f8c11d9e55d"). InnerVolumeSpecName "kube-api-access-qxtxn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:28:04 crc kubenswrapper[4812]: I1125 17:28:04.837514 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "09c619e9-dd51-4931-bb0f-1f8c11d9e55d" (UID: "09c619e9-dd51-4931-bb0f-1f8c11d9e55d"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:28:04 crc kubenswrapper[4812]: I1125 17:28:04.862715 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "09c619e9-dd51-4931-bb0f-1f8c11d9e55d" (UID: "09c619e9-dd51-4931-bb0f-1f8c11d9e55d"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:28:04 crc kubenswrapper[4812]: I1125 17:28:04.863407 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "09c619e9-dd51-4931-bb0f-1f8c11d9e55d" (UID: "09c619e9-dd51-4931-bb0f-1f8c11d9e55d"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:28:04 crc kubenswrapper[4812]: I1125 17:28:04.868494 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-inventory" (OuterVolumeSpecName: "inventory") pod "09c619e9-dd51-4931-bb0f-1f8c11d9e55d" (UID: "09c619e9-dd51-4931-bb0f-1f8c11d9e55d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:28:04 crc kubenswrapper[4812]: I1125 17:28:04.870872 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "09c619e9-dd51-4931-bb0f-1f8c11d9e55d" (UID: "09c619e9-dd51-4931-bb0f-1f8c11d9e55d"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:28:04 crc kubenswrapper[4812]: I1125 17:28:04.931710 4812 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 17:28:04 crc kubenswrapper[4812]: I1125 17:28:04.931758 4812 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 17:28:04 crc kubenswrapper[4812]: I1125 17:28:04.931780 4812 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 17:28:04 crc kubenswrapper[4812]: I1125 17:28:04.931798 4812 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 17:28:04 crc kubenswrapper[4812]: I1125 17:28:04.931817 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qxtxn\" (UniqueName: \"kubernetes.io/projected/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-kube-api-access-qxtxn\") on node \"crc\" DevicePath \"\"" Nov 25 17:28:04 crc kubenswrapper[4812]: I1125 17:28:04.931834 4812 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 17:28:04 crc kubenswrapper[4812]: I1125 17:28:04.931852 4812 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09c619e9-dd51-4931-bb0f-1f8c11d9e55d-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:28:05 crc kubenswrapper[4812]: I1125 17:28:05.226229 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm" event={"ID":"09c619e9-dd51-4931-bb0f-1f8c11d9e55d","Type":"ContainerDied","Data":"653a3b7b295f918b9008f259a680c97dd4d8b5ed33fb77b454e906fc6ed7ff46"} Nov 25 17:28:05 crc kubenswrapper[4812]: I1125 17:28:05.226276 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="653a3b7b295f918b9008f259a680c97dd4d8b5ed33fb77b454e906fc6ed7ff46" Nov 25 17:28:05 crc kubenswrapper[4812]: I1125 17:28:05.226283 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-7r6sm" Nov 25 17:28:05 crc kubenswrapper[4812]: I1125 17:28:05.350412 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-58cb2"] Nov 25 17:28:05 crc kubenswrapper[4812]: E1125 17:28:05.350924 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09c619e9-dd51-4931-bb0f-1f8c11d9e55d" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 25 17:28:05 crc kubenswrapper[4812]: I1125 17:28:05.350951 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="09c619e9-dd51-4931-bb0f-1f8c11d9e55d" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 25 17:28:05 crc kubenswrapper[4812]: I1125 17:28:05.351167 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="09c619e9-dd51-4931-bb0f-1f8c11d9e55d" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 25 17:28:05 crc kubenswrapper[4812]: I1125 17:28:05.352982 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-58cb2" Nov 25 17:28:05 crc kubenswrapper[4812]: I1125 17:28:05.356198 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 17:28:05 crc kubenswrapper[4812]: I1125 17:28:05.356400 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 17:28:05 crc kubenswrapper[4812]: I1125 17:28:05.356690 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 17:28:05 crc kubenswrapper[4812]: I1125 17:28:05.356914 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Nov 25 17:28:05 crc kubenswrapper[4812]: I1125 17:28:05.357096 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wtld4" Nov 25 17:28:05 crc kubenswrapper[4812]: I1125 17:28:05.357295 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 17:28:05 crc kubenswrapper[4812]: I1125 17:28:05.364279 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-58cb2"] Nov 25 17:28:05 crc kubenswrapper[4812]: I1125 17:28:05.543426 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hz8cn\" (UniqueName: \"kubernetes.io/projected/5f52ab49-8a4a-40a2-899c-e0fdada4a23c-kube-api-access-hz8cn\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-58cb2\" (UID: \"5f52ab49-8a4a-40a2-899c-e0fdada4a23c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-58cb2" Nov 25 17:28:05 crc kubenswrapper[4812]: I1125 17:28:05.543512 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5f52ab49-8a4a-40a2-899c-e0fdada4a23c-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-58cb2\" (UID: \"5f52ab49-8a4a-40a2-899c-e0fdada4a23c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-58cb2" Nov 25 17:28:05 crc kubenswrapper[4812]: I1125 17:28:05.543549 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5f52ab49-8a4a-40a2-899c-e0fdada4a23c-ceph\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-58cb2\" (UID: \"5f52ab49-8a4a-40a2-899c-e0fdada4a23c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-58cb2" Nov 25 17:28:05 crc kubenswrapper[4812]: I1125 17:28:05.543790 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5f52ab49-8a4a-40a2-899c-e0fdada4a23c-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-58cb2\" (UID: \"5f52ab49-8a4a-40a2-899c-e0fdada4a23c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-58cb2" Nov 25 17:28:05 crc kubenswrapper[4812]: I1125 17:28:05.543990 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f52ab49-8a4a-40a2-899c-e0fdada4a23c-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-58cb2\" (UID: \"5f52ab49-8a4a-40a2-899c-e0fdada4a23c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-58cb2" Nov 25 17:28:05 crc kubenswrapper[4812]: I1125 17:28:05.544057 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/5f52ab49-8a4a-40a2-899c-e0fdada4a23c-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-58cb2\" (UID: \"5f52ab49-8a4a-40a2-899c-e0fdada4a23c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-58cb2" Nov 25 17:28:05 crc kubenswrapper[4812]: I1125 17:28:05.645372 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/5f52ab49-8a4a-40a2-899c-e0fdada4a23c-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-58cb2\" (UID: \"5f52ab49-8a4a-40a2-899c-e0fdada4a23c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-58cb2" Nov 25 17:28:05 crc kubenswrapper[4812]: I1125 17:28:05.645459 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hz8cn\" (UniqueName: \"kubernetes.io/projected/5f52ab49-8a4a-40a2-899c-e0fdada4a23c-kube-api-access-hz8cn\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-58cb2\" (UID: \"5f52ab49-8a4a-40a2-899c-e0fdada4a23c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-58cb2" Nov 25 17:28:05 crc kubenswrapper[4812]: I1125 17:28:05.645511 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5f52ab49-8a4a-40a2-899c-e0fdada4a23c-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-58cb2\" (UID: \"5f52ab49-8a4a-40a2-899c-e0fdada4a23c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-58cb2" Nov 25 17:28:05 crc kubenswrapper[4812]: I1125 17:28:05.645556 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5f52ab49-8a4a-40a2-899c-e0fdada4a23c-ceph\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-58cb2\" (UID: \"5f52ab49-8a4a-40a2-899c-e0fdada4a23c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-58cb2" Nov 25 17:28:05 crc kubenswrapper[4812]: I1125 17:28:05.645602 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5f52ab49-8a4a-40a2-899c-e0fdada4a23c-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-58cb2\" (UID: \"5f52ab49-8a4a-40a2-899c-e0fdada4a23c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-58cb2" Nov 25 17:28:05 crc kubenswrapper[4812]: I1125 17:28:05.645656 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f52ab49-8a4a-40a2-899c-e0fdada4a23c-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-58cb2\" (UID: \"5f52ab49-8a4a-40a2-899c-e0fdada4a23c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-58cb2" Nov 25 17:28:05 crc kubenswrapper[4812]: I1125 17:28:05.649116 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5f52ab49-8a4a-40a2-899c-e0fdada4a23c-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-58cb2\" (UID: \"5f52ab49-8a4a-40a2-899c-e0fdada4a23c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-58cb2" Nov 25 17:28:05 crc kubenswrapper[4812]: I1125 17:28:05.650306 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/5f52ab49-8a4a-40a2-899c-e0fdada4a23c-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-58cb2\" (UID: \"5f52ab49-8a4a-40a2-899c-e0fdada4a23c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-58cb2" Nov 25 17:28:05 crc kubenswrapper[4812]: I1125 17:28:05.650350 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5f52ab49-8a4a-40a2-899c-e0fdada4a23c-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-58cb2\" (UID: \"5f52ab49-8a4a-40a2-899c-e0fdada4a23c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-58cb2" Nov 25 17:28:05 crc kubenswrapper[4812]: I1125 17:28:05.652356 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f52ab49-8a4a-40a2-899c-e0fdada4a23c-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-58cb2\" (UID: \"5f52ab49-8a4a-40a2-899c-e0fdada4a23c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-58cb2" Nov 25 17:28:05 crc kubenswrapper[4812]: I1125 17:28:05.653149 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5f52ab49-8a4a-40a2-899c-e0fdada4a23c-ceph\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-58cb2\" (UID: \"5f52ab49-8a4a-40a2-899c-e0fdada4a23c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-58cb2" Nov 25 17:28:05 crc kubenswrapper[4812]: I1125 17:28:05.671560 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hz8cn\" (UniqueName: \"kubernetes.io/projected/5f52ab49-8a4a-40a2-899c-e0fdada4a23c-kube-api-access-hz8cn\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-58cb2\" (UID: \"5f52ab49-8a4a-40a2-899c-e0fdada4a23c\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-58cb2" Nov 25 17:28:05 crc kubenswrapper[4812]: I1125 17:28:05.674414 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-58cb2" Nov 25 17:28:05 crc kubenswrapper[4812]: I1125 17:28:05.844353 4812 scope.go:117] "RemoveContainer" containerID="96bbc51328ecd8c911234471c365bd30cd270eb8ef7d45da0893e8889cdaf1af" Nov 25 17:28:05 crc kubenswrapper[4812]: E1125 17:28:05.844898 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:28:06 crc kubenswrapper[4812]: I1125 17:28:06.254221 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-58cb2"] Nov 25 17:28:06 crc kubenswrapper[4812]: I1125 17:28:06.765692 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 17:28:07 crc kubenswrapper[4812]: I1125 17:28:07.242785 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-58cb2" event={"ID":"5f52ab49-8a4a-40a2-899c-e0fdada4a23c","Type":"ContainerStarted","Data":"75bef4eb5177cc9144292dca9ff17cd13c25fd257816d13cf540a6f522c33b26"} Nov 25 17:28:07 crc kubenswrapper[4812]: I1125 17:28:07.242827 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-58cb2" event={"ID":"5f52ab49-8a4a-40a2-899c-e0fdada4a23c","Type":"ContainerStarted","Data":"1daa61a390d884cfe059821b448349c2af85d1482dd7fc9268e973ded6588112"} Nov 25 17:28:07 crc kubenswrapper[4812]: I1125 17:28:07.261003 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-58cb2" podStartSLOduration=1.750768922 podStartE2EDuration="2.260988696s" podCreationTimestamp="2025-11-25 17:28:05 +0000 UTC" firstStartedPulling="2025-11-25 17:28:06.25310479 +0000 UTC m=+2461.093246885" lastFinishedPulling="2025-11-25 17:28:06.763324564 +0000 UTC m=+2461.603466659" observedRunningTime="2025-11-25 17:28:07.258995062 +0000 UTC m=+2462.099137157" watchObservedRunningTime="2025-11-25 17:28:07.260988696 +0000 UTC m=+2462.101130791" Nov 25 17:28:19 crc kubenswrapper[4812]: I1125 17:28:19.831889 4812 scope.go:117] "RemoveContainer" containerID="96bbc51328ecd8c911234471c365bd30cd270eb8ef7d45da0893e8889cdaf1af" Nov 25 17:28:19 crc kubenswrapper[4812]: E1125 17:28:19.832829 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:28:30 crc kubenswrapper[4812]: I1125 17:28:30.832300 4812 scope.go:117] "RemoveContainer" containerID="96bbc51328ecd8c911234471c365bd30cd270eb8ef7d45da0893e8889cdaf1af" Nov 25 17:28:30 crc kubenswrapper[4812]: E1125 17:28:30.833298 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:28:45 crc kubenswrapper[4812]: I1125 17:28:45.832498 4812 scope.go:117] "RemoveContainer" containerID="96bbc51328ecd8c911234471c365bd30cd270eb8ef7d45da0893e8889cdaf1af" Nov 25 17:28:45 crc kubenswrapper[4812]: E1125 17:28:45.833550 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:28:59 crc kubenswrapper[4812]: I1125 17:28:59.831658 4812 scope.go:117] "RemoveContainer" containerID="96bbc51328ecd8c911234471c365bd30cd270eb8ef7d45da0893e8889cdaf1af" Nov 25 17:28:59 crc kubenswrapper[4812]: E1125 17:28:59.832519 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:29:13 crc kubenswrapper[4812]: I1125 17:29:13.833005 4812 scope.go:117] "RemoveContainer" containerID="96bbc51328ecd8c911234471c365bd30cd270eb8ef7d45da0893e8889cdaf1af" Nov 25 17:29:13 crc kubenswrapper[4812]: E1125 17:29:13.834199 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:29:25 crc kubenswrapper[4812]: I1125 17:29:25.841867 4812 scope.go:117] "RemoveContainer" containerID="96bbc51328ecd8c911234471c365bd30cd270eb8ef7d45da0893e8889cdaf1af" Nov 25 17:29:25 crc kubenswrapper[4812]: E1125 17:29:25.842802 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:29:40 crc kubenswrapper[4812]: I1125 17:29:40.832238 4812 scope.go:117] "RemoveContainer" containerID="96bbc51328ecd8c911234471c365bd30cd270eb8ef7d45da0893e8889cdaf1af" Nov 25 17:29:40 crc kubenswrapper[4812]: E1125 17:29:40.833159 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:29:54 crc kubenswrapper[4812]: I1125 17:29:54.832171 4812 scope.go:117] "RemoveContainer" containerID="96bbc51328ecd8c911234471c365bd30cd270eb8ef7d45da0893e8889cdaf1af" Nov 25 17:29:54 crc kubenswrapper[4812]: E1125 17:29:54.833022 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:30:00 crc kubenswrapper[4812]: I1125 17:30:00.153372 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401530-jjtph"] Nov 25 17:30:00 crc kubenswrapper[4812]: I1125 17:30:00.155248 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401530-jjtph" Nov 25 17:30:00 crc kubenswrapper[4812]: I1125 17:30:00.157494 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 17:30:00 crc kubenswrapper[4812]: I1125 17:30:00.158085 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 17:30:00 crc kubenswrapper[4812]: I1125 17:30:00.188738 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401530-jjtph"] Nov 25 17:30:00 crc kubenswrapper[4812]: I1125 17:30:00.273602 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vsk8k\" (UniqueName: \"kubernetes.io/projected/598db9ac-4bce-4d73-ae3e-a6f990b0924d-kube-api-access-vsk8k\") pod \"collect-profiles-29401530-jjtph\" (UID: \"598db9ac-4bce-4d73-ae3e-a6f990b0924d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401530-jjtph" Nov 25 17:30:00 crc kubenswrapper[4812]: I1125 17:30:00.273841 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/598db9ac-4bce-4d73-ae3e-a6f990b0924d-config-volume\") pod \"collect-profiles-29401530-jjtph\" (UID: \"598db9ac-4bce-4d73-ae3e-a6f990b0924d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401530-jjtph" Nov 25 17:30:00 crc kubenswrapper[4812]: I1125 17:30:00.273908 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/598db9ac-4bce-4d73-ae3e-a6f990b0924d-secret-volume\") pod \"collect-profiles-29401530-jjtph\" (UID: \"598db9ac-4bce-4d73-ae3e-a6f990b0924d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401530-jjtph" Nov 25 17:30:00 crc kubenswrapper[4812]: I1125 17:30:00.375764 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/598db9ac-4bce-4d73-ae3e-a6f990b0924d-config-volume\") pod \"collect-profiles-29401530-jjtph\" (UID: \"598db9ac-4bce-4d73-ae3e-a6f990b0924d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401530-jjtph" Nov 25 17:30:00 crc kubenswrapper[4812]: I1125 17:30:00.376058 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/598db9ac-4bce-4d73-ae3e-a6f990b0924d-secret-volume\") pod \"collect-profiles-29401530-jjtph\" (UID: \"598db9ac-4bce-4d73-ae3e-a6f990b0924d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401530-jjtph" Nov 25 17:30:00 crc kubenswrapper[4812]: I1125 17:30:00.376106 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vsk8k\" (UniqueName: \"kubernetes.io/projected/598db9ac-4bce-4d73-ae3e-a6f990b0924d-kube-api-access-vsk8k\") pod \"collect-profiles-29401530-jjtph\" (UID: \"598db9ac-4bce-4d73-ae3e-a6f990b0924d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401530-jjtph" Nov 25 17:30:00 crc kubenswrapper[4812]: I1125 17:30:00.376946 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/598db9ac-4bce-4d73-ae3e-a6f990b0924d-config-volume\") pod \"collect-profiles-29401530-jjtph\" (UID: \"598db9ac-4bce-4d73-ae3e-a6f990b0924d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401530-jjtph" Nov 25 17:30:00 crc kubenswrapper[4812]: I1125 17:30:00.385335 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/598db9ac-4bce-4d73-ae3e-a6f990b0924d-secret-volume\") pod \"collect-profiles-29401530-jjtph\" (UID: \"598db9ac-4bce-4d73-ae3e-a6f990b0924d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401530-jjtph" Nov 25 17:30:00 crc kubenswrapper[4812]: I1125 17:30:00.397189 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vsk8k\" (UniqueName: \"kubernetes.io/projected/598db9ac-4bce-4d73-ae3e-a6f990b0924d-kube-api-access-vsk8k\") pod \"collect-profiles-29401530-jjtph\" (UID: \"598db9ac-4bce-4d73-ae3e-a6f990b0924d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401530-jjtph" Nov 25 17:30:00 crc kubenswrapper[4812]: I1125 17:30:00.517972 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401530-jjtph" Nov 25 17:30:01 crc kubenswrapper[4812]: I1125 17:30:01.025476 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401530-jjtph"] Nov 25 17:30:01 crc kubenswrapper[4812]: I1125 17:30:01.644050 4812 generic.go:334] "Generic (PLEG): container finished" podID="598db9ac-4bce-4d73-ae3e-a6f990b0924d" containerID="889305f125e52020047701a0e933bc56a973b573bb04fd53585398696d8d5e3e" exitCode=0 Nov 25 17:30:01 crc kubenswrapper[4812]: I1125 17:30:01.644188 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401530-jjtph" event={"ID":"598db9ac-4bce-4d73-ae3e-a6f990b0924d","Type":"ContainerDied","Data":"889305f125e52020047701a0e933bc56a973b573bb04fd53585398696d8d5e3e"} Nov 25 17:30:01 crc kubenswrapper[4812]: I1125 17:30:01.644450 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401530-jjtph" event={"ID":"598db9ac-4bce-4d73-ae3e-a6f990b0924d","Type":"ContainerStarted","Data":"8fc5210415d3e8e693d28c46d154083f1091ced90aabdb7ec4b4d5f3b04643e9"} Nov 25 17:30:03 crc kubenswrapper[4812]: I1125 17:30:03.067721 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401530-jjtph" Nov 25 17:30:03 crc kubenswrapper[4812]: I1125 17:30:03.227380 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vsk8k\" (UniqueName: \"kubernetes.io/projected/598db9ac-4bce-4d73-ae3e-a6f990b0924d-kube-api-access-vsk8k\") pod \"598db9ac-4bce-4d73-ae3e-a6f990b0924d\" (UID: \"598db9ac-4bce-4d73-ae3e-a6f990b0924d\") " Nov 25 17:30:03 crc kubenswrapper[4812]: I1125 17:30:03.227434 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/598db9ac-4bce-4d73-ae3e-a6f990b0924d-config-volume\") pod \"598db9ac-4bce-4d73-ae3e-a6f990b0924d\" (UID: \"598db9ac-4bce-4d73-ae3e-a6f990b0924d\") " Nov 25 17:30:03 crc kubenswrapper[4812]: I1125 17:30:03.227482 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/598db9ac-4bce-4d73-ae3e-a6f990b0924d-secret-volume\") pod \"598db9ac-4bce-4d73-ae3e-a6f990b0924d\" (UID: \"598db9ac-4bce-4d73-ae3e-a6f990b0924d\") " Nov 25 17:30:03 crc kubenswrapper[4812]: I1125 17:30:03.228303 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/598db9ac-4bce-4d73-ae3e-a6f990b0924d-config-volume" (OuterVolumeSpecName: "config-volume") pod "598db9ac-4bce-4d73-ae3e-a6f990b0924d" (UID: "598db9ac-4bce-4d73-ae3e-a6f990b0924d"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:30:03 crc kubenswrapper[4812]: I1125 17:30:03.233053 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/598db9ac-4bce-4d73-ae3e-a6f990b0924d-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "598db9ac-4bce-4d73-ae3e-a6f990b0924d" (UID: "598db9ac-4bce-4d73-ae3e-a6f990b0924d"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:30:03 crc kubenswrapper[4812]: I1125 17:30:03.235104 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/598db9ac-4bce-4d73-ae3e-a6f990b0924d-kube-api-access-vsk8k" (OuterVolumeSpecName: "kube-api-access-vsk8k") pod "598db9ac-4bce-4d73-ae3e-a6f990b0924d" (UID: "598db9ac-4bce-4d73-ae3e-a6f990b0924d"). InnerVolumeSpecName "kube-api-access-vsk8k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:30:03 crc kubenswrapper[4812]: I1125 17:30:03.330060 4812 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/598db9ac-4bce-4d73-ae3e-a6f990b0924d-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 17:30:03 crc kubenswrapper[4812]: I1125 17:30:03.330115 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vsk8k\" (UniqueName: \"kubernetes.io/projected/598db9ac-4bce-4d73-ae3e-a6f990b0924d-kube-api-access-vsk8k\") on node \"crc\" DevicePath \"\"" Nov 25 17:30:03 crc kubenswrapper[4812]: I1125 17:30:03.330133 4812 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/598db9ac-4bce-4d73-ae3e-a6f990b0924d-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 17:30:03 crc kubenswrapper[4812]: I1125 17:30:03.662943 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401530-jjtph" event={"ID":"598db9ac-4bce-4d73-ae3e-a6f990b0924d","Type":"ContainerDied","Data":"8fc5210415d3e8e693d28c46d154083f1091ced90aabdb7ec4b4d5f3b04643e9"} Nov 25 17:30:03 crc kubenswrapper[4812]: I1125 17:30:03.662998 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401530-jjtph" Nov 25 17:30:03 crc kubenswrapper[4812]: I1125 17:30:03.663034 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8fc5210415d3e8e693d28c46d154083f1091ced90aabdb7ec4b4d5f3b04643e9" Nov 25 17:30:04 crc kubenswrapper[4812]: I1125 17:30:04.137976 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401485-brhb9"] Nov 25 17:30:04 crc kubenswrapper[4812]: I1125 17:30:04.145780 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401485-brhb9"] Nov 25 17:30:05 crc kubenswrapper[4812]: I1125 17:30:05.844616 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d7bb301b-f2a3-4526-b953-d6aa12d8621c" path="/var/lib/kubelet/pods/d7bb301b-f2a3-4526-b953-d6aa12d8621c/volumes" Nov 25 17:30:06 crc kubenswrapper[4812]: I1125 17:30:06.831054 4812 scope.go:117] "RemoveContainer" containerID="96bbc51328ecd8c911234471c365bd30cd270eb8ef7d45da0893e8889cdaf1af" Nov 25 17:30:06 crc kubenswrapper[4812]: E1125 17:30:06.831604 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:30:17 crc kubenswrapper[4812]: I1125 17:30:17.831863 4812 scope.go:117] "RemoveContainer" containerID="96bbc51328ecd8c911234471c365bd30cd270eb8ef7d45da0893e8889cdaf1af" Nov 25 17:30:17 crc kubenswrapper[4812]: E1125 17:30:17.832959 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:30:32 crc kubenswrapper[4812]: I1125 17:30:32.832046 4812 scope.go:117] "RemoveContainer" containerID="96bbc51328ecd8c911234471c365bd30cd270eb8ef7d45da0893e8889cdaf1af" Nov 25 17:30:33 crc kubenswrapper[4812]: I1125 17:30:33.964871 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" event={"ID":"8ed911cf-2139-4b12-84ba-af635585ba29","Type":"ContainerStarted","Data":"4663c5c86743f0128022e03f1f9d0812eba97990871d15cc9bec70b7e56a04db"} Nov 25 17:31:01 crc kubenswrapper[4812]: I1125 17:31:01.391310 4812 scope.go:117] "RemoveContainer" containerID="fa3b820d94a17c4fda80f9eb120eb5aec8cf4312f89c01e688ac8881128a851f" Nov 25 17:31:01 crc kubenswrapper[4812]: I1125 17:31:01.419696 4812 scope.go:117] "RemoveContainer" containerID="b3355e4fbfb64260b06c6668ec60fcef7510c0daa7f3154f6b4b5e193ebd7602" Nov 25 17:31:01 crc kubenswrapper[4812]: I1125 17:31:01.479922 4812 scope.go:117] "RemoveContainer" containerID="634fb7fe22c335c8c827de911ee81b85151ccca9cf83426dbeed6250432a3d1a" Nov 25 17:31:01 crc kubenswrapper[4812]: I1125 17:31:01.516020 4812 scope.go:117] "RemoveContainer" containerID="9b3ae785843d9b3c1948e001e21b5f014a555f6915d837777b8b0f98bcd56339" Nov 25 17:32:50 crc kubenswrapper[4812]: I1125 17:32:50.313641 4812 generic.go:334] "Generic (PLEG): container finished" podID="5f52ab49-8a4a-40a2-899c-e0fdada4a23c" containerID="75bef4eb5177cc9144292dca9ff17cd13c25fd257816d13cf540a6f522c33b26" exitCode=0 Nov 25 17:32:50 crc kubenswrapper[4812]: I1125 17:32:50.313756 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-58cb2" event={"ID":"5f52ab49-8a4a-40a2-899c-e0fdada4a23c","Type":"ContainerDied","Data":"75bef4eb5177cc9144292dca9ff17cd13c25fd257816d13cf540a6f522c33b26"} Nov 25 17:32:51 crc kubenswrapper[4812]: I1125 17:32:51.764235 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-58cb2" Nov 25 17:32:51 crc kubenswrapper[4812]: I1125 17:32:51.889874 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hz8cn\" (UniqueName: \"kubernetes.io/projected/5f52ab49-8a4a-40a2-899c-e0fdada4a23c-kube-api-access-hz8cn\") pod \"5f52ab49-8a4a-40a2-899c-e0fdada4a23c\" (UID: \"5f52ab49-8a4a-40a2-899c-e0fdada4a23c\") " Nov 25 17:32:51 crc kubenswrapper[4812]: I1125 17:32:51.890038 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5f52ab49-8a4a-40a2-899c-e0fdada4a23c-ceph\") pod \"5f52ab49-8a4a-40a2-899c-e0fdada4a23c\" (UID: \"5f52ab49-8a4a-40a2-899c-e0fdada4a23c\") " Nov 25 17:32:51 crc kubenswrapper[4812]: I1125 17:32:51.890163 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f52ab49-8a4a-40a2-899c-e0fdada4a23c-libvirt-combined-ca-bundle\") pod \"5f52ab49-8a4a-40a2-899c-e0fdada4a23c\" (UID: \"5f52ab49-8a4a-40a2-899c-e0fdada4a23c\") " Nov 25 17:32:51 crc kubenswrapper[4812]: I1125 17:32:51.890191 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/5f52ab49-8a4a-40a2-899c-e0fdada4a23c-libvirt-secret-0\") pod \"5f52ab49-8a4a-40a2-899c-e0fdada4a23c\" (UID: \"5f52ab49-8a4a-40a2-899c-e0fdada4a23c\") " Nov 25 17:32:51 crc kubenswrapper[4812]: I1125 17:32:51.890218 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5f52ab49-8a4a-40a2-899c-e0fdada4a23c-ssh-key\") pod \"5f52ab49-8a4a-40a2-899c-e0fdada4a23c\" (UID: \"5f52ab49-8a4a-40a2-899c-e0fdada4a23c\") " Nov 25 17:32:51 crc kubenswrapper[4812]: I1125 17:32:51.890248 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5f52ab49-8a4a-40a2-899c-e0fdada4a23c-inventory\") pod \"5f52ab49-8a4a-40a2-899c-e0fdada4a23c\" (UID: \"5f52ab49-8a4a-40a2-899c-e0fdada4a23c\") " Nov 25 17:32:51 crc kubenswrapper[4812]: I1125 17:32:51.895766 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f52ab49-8a4a-40a2-899c-e0fdada4a23c-kube-api-access-hz8cn" (OuterVolumeSpecName: "kube-api-access-hz8cn") pod "5f52ab49-8a4a-40a2-899c-e0fdada4a23c" (UID: "5f52ab49-8a4a-40a2-899c-e0fdada4a23c"). InnerVolumeSpecName "kube-api-access-hz8cn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:32:51 crc kubenswrapper[4812]: I1125 17:32:51.896299 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f52ab49-8a4a-40a2-899c-e0fdada4a23c-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "5f52ab49-8a4a-40a2-899c-e0fdada4a23c" (UID: "5f52ab49-8a4a-40a2-899c-e0fdada4a23c"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:32:51 crc kubenswrapper[4812]: I1125 17:32:51.896913 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f52ab49-8a4a-40a2-899c-e0fdada4a23c-ceph" (OuterVolumeSpecName: "ceph") pod "5f52ab49-8a4a-40a2-899c-e0fdada4a23c" (UID: "5f52ab49-8a4a-40a2-899c-e0fdada4a23c"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:32:51 crc kubenswrapper[4812]: I1125 17:32:51.920457 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f52ab49-8a4a-40a2-899c-e0fdada4a23c-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "5f52ab49-8a4a-40a2-899c-e0fdada4a23c" (UID: "5f52ab49-8a4a-40a2-899c-e0fdada4a23c"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:32:51 crc kubenswrapper[4812]: I1125 17:32:51.928757 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f52ab49-8a4a-40a2-899c-e0fdada4a23c-inventory" (OuterVolumeSpecName: "inventory") pod "5f52ab49-8a4a-40a2-899c-e0fdada4a23c" (UID: "5f52ab49-8a4a-40a2-899c-e0fdada4a23c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:32:51 crc kubenswrapper[4812]: I1125 17:32:51.928870 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f52ab49-8a4a-40a2-899c-e0fdada4a23c-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "5f52ab49-8a4a-40a2-899c-e0fdada4a23c" (UID: "5f52ab49-8a4a-40a2-899c-e0fdada4a23c"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:32:51 crc kubenswrapper[4812]: I1125 17:32:51.992309 4812 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5f52ab49-8a4a-40a2-899c-e0fdada4a23c-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 17:32:51 crc kubenswrapper[4812]: I1125 17:32:51.992657 4812 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f52ab49-8a4a-40a2-899c-e0fdada4a23c-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:32:51 crc kubenswrapper[4812]: I1125 17:32:51.992672 4812 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/5f52ab49-8a4a-40a2-899c-e0fdada4a23c-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Nov 25 17:32:51 crc kubenswrapper[4812]: I1125 17:32:51.992681 4812 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5f52ab49-8a4a-40a2-899c-e0fdada4a23c-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 17:32:51 crc kubenswrapper[4812]: I1125 17:32:51.992689 4812 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5f52ab49-8a4a-40a2-899c-e0fdada4a23c-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 17:32:51 crc kubenswrapper[4812]: I1125 17:32:51.992697 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hz8cn\" (UniqueName: \"kubernetes.io/projected/5f52ab49-8a4a-40a2-899c-e0fdada4a23c-kube-api-access-hz8cn\") on node \"crc\" DevicePath \"\"" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.338446 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-58cb2" event={"ID":"5f52ab49-8a4a-40a2-899c-e0fdada4a23c","Type":"ContainerDied","Data":"1daa61a390d884cfe059821b448349c2af85d1482dd7fc9268e973ded6588112"} Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.338818 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1daa61a390d884cfe059821b448349c2af85d1482dd7fc9268e973ded6588112" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.338564 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-58cb2" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.425841 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg"] Nov 25 17:32:52 crc kubenswrapper[4812]: E1125 17:32:52.426176 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="598db9ac-4bce-4d73-ae3e-a6f990b0924d" containerName="collect-profiles" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.426195 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="598db9ac-4bce-4d73-ae3e-a6f990b0924d" containerName="collect-profiles" Nov 25 17:32:52 crc kubenswrapper[4812]: E1125 17:32:52.426210 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f52ab49-8a4a-40a2-899c-e0fdada4a23c" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.426220 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f52ab49-8a4a-40a2-899c-e0fdada4a23c" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.426401 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="598db9ac-4bce-4d73-ae3e-a6f990b0924d" containerName="collect-profiles" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.426418 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f52ab49-8a4a-40a2-899c-e0fdada4a23c" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.426994 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.428856 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.429104 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.429318 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wtld4" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.429597 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.430776 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.431043 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.431258 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.431270 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.444217 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ceph-nova" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.445666 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg"] Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.603564 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-nova-custom-ceph-combined-ca-bundle\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.604038 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-nova-migration-ssh-key-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.604159 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-ceph-nova-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.604798 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-nova-extra-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.604925 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-nova-cell1-compute-config-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.605024 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-ceph\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.605154 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-nova-cell1-compute-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.605308 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-inventory\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.605436 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-nova-migration-ssh-key-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.605634 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nmlbw\" (UniqueName: \"kubernetes.io/projected/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-kube-api-access-nmlbw\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.605799 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-ssh-key\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.708854 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-nova-migration-ssh-key-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.708987 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-ceph-nova-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.709084 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-nova-extra-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.709143 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-nova-cell1-compute-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.709184 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-nova-cell1-compute-config-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.709224 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-ceph\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.709270 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-inventory\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.709321 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-nova-migration-ssh-key-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.709398 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nmlbw\" (UniqueName: \"kubernetes.io/projected/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-kube-api-access-nmlbw\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.709463 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-ssh-key\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.709614 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-nova-custom-ceph-combined-ca-bundle\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.710425 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-nova-extra-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.710956 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-ceph-nova-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.713908 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-nova-migration-ssh-key-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.713925 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-nova-cell1-compute-config-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.714702 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-nova-custom-ceph-combined-ca-bundle\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.714855 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-nova-cell1-compute-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.714990 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-inventory\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.715021 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-nova-migration-ssh-key-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.716560 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-ceph\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.725242 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-ssh-key\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.740166 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nmlbw\" (UniqueName: \"kubernetes.io/projected/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-kube-api-access-nmlbw\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" Nov 25 17:32:52 crc kubenswrapper[4812]: I1125 17:32:52.746085 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" Nov 25 17:32:53 crc kubenswrapper[4812]: I1125 17:32:53.337335 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg"] Nov 25 17:32:53 crc kubenswrapper[4812]: W1125 17:32:53.345634 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd4e6d41e_fef0_4ef7_a3e7_96d5c5be146e.slice/crio-2eea05cd43b5a1f4262fdd661957a668062fe28210dc54c88efb13e18497f879 WatchSource:0}: Error finding container 2eea05cd43b5a1f4262fdd661957a668062fe28210dc54c88efb13e18497f879: Status 404 returned error can't find the container with id 2eea05cd43b5a1f4262fdd661957a668062fe28210dc54c88efb13e18497f879 Nov 25 17:32:53 crc kubenswrapper[4812]: I1125 17:32:53.350121 4812 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 17:32:54 crc kubenswrapper[4812]: I1125 17:32:54.354551 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" event={"ID":"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e","Type":"ContainerStarted","Data":"5e0d9d2b2026458d26cb3ebf57b96bd1031397c38209c542c876b0f431f119de"} Nov 25 17:32:54 crc kubenswrapper[4812]: I1125 17:32:54.354825 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" event={"ID":"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e","Type":"ContainerStarted","Data":"2eea05cd43b5a1f4262fdd661957a668062fe28210dc54c88efb13e18497f879"} Nov 25 17:32:54 crc kubenswrapper[4812]: I1125 17:32:54.377703 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" podStartSLOduration=1.856786195 podStartE2EDuration="2.377682382s" podCreationTimestamp="2025-11-25 17:32:52 +0000 UTC" firstStartedPulling="2025-11-25 17:32:53.349881167 +0000 UTC m=+2748.190023262" lastFinishedPulling="2025-11-25 17:32:53.870777354 +0000 UTC m=+2748.710919449" observedRunningTime="2025-11-25 17:32:54.371891746 +0000 UTC m=+2749.212033841" watchObservedRunningTime="2025-11-25 17:32:54.377682382 +0000 UTC m=+2749.217824477" Nov 25 17:32:57 crc kubenswrapper[4812]: I1125 17:32:57.332756 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:32:57 crc kubenswrapper[4812]: I1125 17:32:57.333238 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:33:27 crc kubenswrapper[4812]: I1125 17:33:27.333400 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:33:27 crc kubenswrapper[4812]: I1125 17:33:27.334846 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:33:36 crc kubenswrapper[4812]: I1125 17:33:36.174795 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-89hcv"] Nov 25 17:33:36 crc kubenswrapper[4812]: I1125 17:33:36.177760 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-89hcv" Nov 25 17:33:36 crc kubenswrapper[4812]: I1125 17:33:36.187583 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-89hcv"] Nov 25 17:33:36 crc kubenswrapper[4812]: I1125 17:33:36.328860 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-frmlk\" (UniqueName: \"kubernetes.io/projected/72901cdb-a063-460f-be07-d7b04e7dcf33-kube-api-access-frmlk\") pod \"redhat-marketplace-89hcv\" (UID: \"72901cdb-a063-460f-be07-d7b04e7dcf33\") " pod="openshift-marketplace/redhat-marketplace-89hcv" Nov 25 17:33:36 crc kubenswrapper[4812]: I1125 17:33:36.329152 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72901cdb-a063-460f-be07-d7b04e7dcf33-utilities\") pod \"redhat-marketplace-89hcv\" (UID: \"72901cdb-a063-460f-be07-d7b04e7dcf33\") " pod="openshift-marketplace/redhat-marketplace-89hcv" Nov 25 17:33:36 crc kubenswrapper[4812]: I1125 17:33:36.329197 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72901cdb-a063-460f-be07-d7b04e7dcf33-catalog-content\") pod \"redhat-marketplace-89hcv\" (UID: \"72901cdb-a063-460f-be07-d7b04e7dcf33\") " pod="openshift-marketplace/redhat-marketplace-89hcv" Nov 25 17:33:36 crc kubenswrapper[4812]: I1125 17:33:36.431466 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-frmlk\" (UniqueName: \"kubernetes.io/projected/72901cdb-a063-460f-be07-d7b04e7dcf33-kube-api-access-frmlk\") pod \"redhat-marketplace-89hcv\" (UID: \"72901cdb-a063-460f-be07-d7b04e7dcf33\") " pod="openshift-marketplace/redhat-marketplace-89hcv" Nov 25 17:33:36 crc kubenswrapper[4812]: I1125 17:33:36.431922 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72901cdb-a063-460f-be07-d7b04e7dcf33-utilities\") pod \"redhat-marketplace-89hcv\" (UID: \"72901cdb-a063-460f-be07-d7b04e7dcf33\") " pod="openshift-marketplace/redhat-marketplace-89hcv" Nov 25 17:33:36 crc kubenswrapper[4812]: I1125 17:33:36.432035 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72901cdb-a063-460f-be07-d7b04e7dcf33-catalog-content\") pod \"redhat-marketplace-89hcv\" (UID: \"72901cdb-a063-460f-be07-d7b04e7dcf33\") " pod="openshift-marketplace/redhat-marketplace-89hcv" Nov 25 17:33:36 crc kubenswrapper[4812]: I1125 17:33:36.432327 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72901cdb-a063-460f-be07-d7b04e7dcf33-utilities\") pod \"redhat-marketplace-89hcv\" (UID: \"72901cdb-a063-460f-be07-d7b04e7dcf33\") " pod="openshift-marketplace/redhat-marketplace-89hcv" Nov 25 17:33:36 crc kubenswrapper[4812]: I1125 17:33:36.432612 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72901cdb-a063-460f-be07-d7b04e7dcf33-catalog-content\") pod \"redhat-marketplace-89hcv\" (UID: \"72901cdb-a063-460f-be07-d7b04e7dcf33\") " pod="openshift-marketplace/redhat-marketplace-89hcv" Nov 25 17:33:36 crc kubenswrapper[4812]: I1125 17:33:36.461612 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-frmlk\" (UniqueName: \"kubernetes.io/projected/72901cdb-a063-460f-be07-d7b04e7dcf33-kube-api-access-frmlk\") pod \"redhat-marketplace-89hcv\" (UID: \"72901cdb-a063-460f-be07-d7b04e7dcf33\") " pod="openshift-marketplace/redhat-marketplace-89hcv" Nov 25 17:33:36 crc kubenswrapper[4812]: I1125 17:33:36.499038 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-89hcv" Nov 25 17:33:36 crc kubenswrapper[4812]: W1125 17:33:36.975515 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod72901cdb_a063_460f_be07_d7b04e7dcf33.slice/crio-7548246687c231300dc2ba04bfdd8eecc28aeea00431724f7be063f650c7500c WatchSource:0}: Error finding container 7548246687c231300dc2ba04bfdd8eecc28aeea00431724f7be063f650c7500c: Status 404 returned error can't find the container with id 7548246687c231300dc2ba04bfdd8eecc28aeea00431724f7be063f650c7500c Nov 25 17:33:36 crc kubenswrapper[4812]: I1125 17:33:36.975960 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-89hcv"] Nov 25 17:33:37 crc kubenswrapper[4812]: I1125 17:33:37.803353 4812 generic.go:334] "Generic (PLEG): container finished" podID="72901cdb-a063-460f-be07-d7b04e7dcf33" containerID="f468c98f50bcf88c592681586d0c04a434a8ffcc418b4717c100f3daa37e305d" exitCode=0 Nov 25 17:33:37 crc kubenswrapper[4812]: I1125 17:33:37.803402 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-89hcv" event={"ID":"72901cdb-a063-460f-be07-d7b04e7dcf33","Type":"ContainerDied","Data":"f468c98f50bcf88c592681586d0c04a434a8ffcc418b4717c100f3daa37e305d"} Nov 25 17:33:37 crc kubenswrapper[4812]: I1125 17:33:37.803449 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-89hcv" event={"ID":"72901cdb-a063-460f-be07-d7b04e7dcf33","Type":"ContainerStarted","Data":"7548246687c231300dc2ba04bfdd8eecc28aeea00431724f7be063f650c7500c"} Nov 25 17:33:38 crc kubenswrapper[4812]: I1125 17:33:38.812398 4812 generic.go:334] "Generic (PLEG): container finished" podID="72901cdb-a063-460f-be07-d7b04e7dcf33" containerID="8c617f8999d3032d3b8e04fb0fde653532d7e34c808a6f9b32471d4e1221d282" exitCode=0 Nov 25 17:33:38 crc kubenswrapper[4812]: I1125 17:33:38.812605 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-89hcv" event={"ID":"72901cdb-a063-460f-be07-d7b04e7dcf33","Type":"ContainerDied","Data":"8c617f8999d3032d3b8e04fb0fde653532d7e34c808a6f9b32471d4e1221d282"} Nov 25 17:33:39 crc kubenswrapper[4812]: I1125 17:33:39.822564 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-89hcv" event={"ID":"72901cdb-a063-460f-be07-d7b04e7dcf33","Type":"ContainerStarted","Data":"b7e0a17db2f1b206976b099b764ffe6f3547177d9e232377b9848b82cb1072ba"} Nov 25 17:33:39 crc kubenswrapper[4812]: I1125 17:33:39.847957 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-89hcv" podStartSLOduration=2.46938917 podStartE2EDuration="3.847938102s" podCreationTimestamp="2025-11-25 17:33:36 +0000 UTC" firstStartedPulling="2025-11-25 17:33:37.805025315 +0000 UTC m=+2792.645167420" lastFinishedPulling="2025-11-25 17:33:39.183574257 +0000 UTC m=+2794.023716352" observedRunningTime="2025-11-25 17:33:39.840552272 +0000 UTC m=+2794.680694377" watchObservedRunningTime="2025-11-25 17:33:39.847938102 +0000 UTC m=+2794.688080197" Nov 25 17:33:46 crc kubenswrapper[4812]: I1125 17:33:46.499265 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-89hcv" Nov 25 17:33:46 crc kubenswrapper[4812]: I1125 17:33:46.499892 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-89hcv" Nov 25 17:33:46 crc kubenswrapper[4812]: I1125 17:33:46.620624 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-89hcv" Nov 25 17:33:46 crc kubenswrapper[4812]: I1125 17:33:46.929170 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-89hcv" Nov 25 17:33:46 crc kubenswrapper[4812]: I1125 17:33:46.982783 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-89hcv"] Nov 25 17:33:48 crc kubenswrapper[4812]: I1125 17:33:48.904780 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-89hcv" podUID="72901cdb-a063-460f-be07-d7b04e7dcf33" containerName="registry-server" containerID="cri-o://b7e0a17db2f1b206976b099b764ffe6f3547177d9e232377b9848b82cb1072ba" gracePeriod=2 Nov 25 17:33:49 crc kubenswrapper[4812]: I1125 17:33:49.398492 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-89hcv" Nov 25 17:33:49 crc kubenswrapper[4812]: I1125 17:33:49.491159 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-frmlk\" (UniqueName: \"kubernetes.io/projected/72901cdb-a063-460f-be07-d7b04e7dcf33-kube-api-access-frmlk\") pod \"72901cdb-a063-460f-be07-d7b04e7dcf33\" (UID: \"72901cdb-a063-460f-be07-d7b04e7dcf33\") " Nov 25 17:33:49 crc kubenswrapper[4812]: I1125 17:33:49.491246 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72901cdb-a063-460f-be07-d7b04e7dcf33-utilities\") pod \"72901cdb-a063-460f-be07-d7b04e7dcf33\" (UID: \"72901cdb-a063-460f-be07-d7b04e7dcf33\") " Nov 25 17:33:49 crc kubenswrapper[4812]: I1125 17:33:49.491370 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72901cdb-a063-460f-be07-d7b04e7dcf33-catalog-content\") pod \"72901cdb-a063-460f-be07-d7b04e7dcf33\" (UID: \"72901cdb-a063-460f-be07-d7b04e7dcf33\") " Nov 25 17:33:49 crc kubenswrapper[4812]: I1125 17:33:49.492401 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/72901cdb-a063-460f-be07-d7b04e7dcf33-utilities" (OuterVolumeSpecName: "utilities") pod "72901cdb-a063-460f-be07-d7b04e7dcf33" (UID: "72901cdb-a063-460f-be07-d7b04e7dcf33"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:33:49 crc kubenswrapper[4812]: I1125 17:33:49.499091 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/72901cdb-a063-460f-be07-d7b04e7dcf33-kube-api-access-frmlk" (OuterVolumeSpecName: "kube-api-access-frmlk") pod "72901cdb-a063-460f-be07-d7b04e7dcf33" (UID: "72901cdb-a063-460f-be07-d7b04e7dcf33"). InnerVolumeSpecName "kube-api-access-frmlk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:33:49 crc kubenswrapper[4812]: I1125 17:33:49.526461 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/72901cdb-a063-460f-be07-d7b04e7dcf33-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "72901cdb-a063-460f-be07-d7b04e7dcf33" (UID: "72901cdb-a063-460f-be07-d7b04e7dcf33"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:33:49 crc kubenswrapper[4812]: I1125 17:33:49.593427 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-frmlk\" (UniqueName: \"kubernetes.io/projected/72901cdb-a063-460f-be07-d7b04e7dcf33-kube-api-access-frmlk\") on node \"crc\" DevicePath \"\"" Nov 25 17:33:49 crc kubenswrapper[4812]: I1125 17:33:49.593465 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72901cdb-a063-460f-be07-d7b04e7dcf33-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:33:49 crc kubenswrapper[4812]: I1125 17:33:49.593475 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72901cdb-a063-460f-be07-d7b04e7dcf33-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:33:49 crc kubenswrapper[4812]: I1125 17:33:49.918414 4812 generic.go:334] "Generic (PLEG): container finished" podID="72901cdb-a063-460f-be07-d7b04e7dcf33" containerID="b7e0a17db2f1b206976b099b764ffe6f3547177d9e232377b9848b82cb1072ba" exitCode=0 Nov 25 17:33:49 crc kubenswrapper[4812]: I1125 17:33:49.918466 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-89hcv" Nov 25 17:33:49 crc kubenswrapper[4812]: I1125 17:33:49.918485 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-89hcv" event={"ID":"72901cdb-a063-460f-be07-d7b04e7dcf33","Type":"ContainerDied","Data":"b7e0a17db2f1b206976b099b764ffe6f3547177d9e232377b9848b82cb1072ba"} Nov 25 17:33:49 crc kubenswrapper[4812]: I1125 17:33:49.918909 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-89hcv" event={"ID":"72901cdb-a063-460f-be07-d7b04e7dcf33","Type":"ContainerDied","Data":"7548246687c231300dc2ba04bfdd8eecc28aeea00431724f7be063f650c7500c"} Nov 25 17:33:49 crc kubenswrapper[4812]: I1125 17:33:49.918954 4812 scope.go:117] "RemoveContainer" containerID="b7e0a17db2f1b206976b099b764ffe6f3547177d9e232377b9848b82cb1072ba" Nov 25 17:33:49 crc kubenswrapper[4812]: I1125 17:33:49.943200 4812 scope.go:117] "RemoveContainer" containerID="8c617f8999d3032d3b8e04fb0fde653532d7e34c808a6f9b32471d4e1221d282" Nov 25 17:33:49 crc kubenswrapper[4812]: I1125 17:33:49.956513 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-89hcv"] Nov 25 17:33:49 crc kubenswrapper[4812]: I1125 17:33:49.963852 4812 scope.go:117] "RemoveContainer" containerID="f468c98f50bcf88c592681586d0c04a434a8ffcc418b4717c100f3daa37e305d" Nov 25 17:33:49 crc kubenswrapper[4812]: I1125 17:33:49.968208 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-89hcv"] Nov 25 17:33:50 crc kubenswrapper[4812]: I1125 17:33:50.008106 4812 scope.go:117] "RemoveContainer" containerID="b7e0a17db2f1b206976b099b764ffe6f3547177d9e232377b9848b82cb1072ba" Nov 25 17:33:50 crc kubenswrapper[4812]: E1125 17:33:50.008493 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b7e0a17db2f1b206976b099b764ffe6f3547177d9e232377b9848b82cb1072ba\": container with ID starting with b7e0a17db2f1b206976b099b764ffe6f3547177d9e232377b9848b82cb1072ba not found: ID does not exist" containerID="b7e0a17db2f1b206976b099b764ffe6f3547177d9e232377b9848b82cb1072ba" Nov 25 17:33:50 crc kubenswrapper[4812]: I1125 17:33:50.008519 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7e0a17db2f1b206976b099b764ffe6f3547177d9e232377b9848b82cb1072ba"} err="failed to get container status \"b7e0a17db2f1b206976b099b764ffe6f3547177d9e232377b9848b82cb1072ba\": rpc error: code = NotFound desc = could not find container \"b7e0a17db2f1b206976b099b764ffe6f3547177d9e232377b9848b82cb1072ba\": container with ID starting with b7e0a17db2f1b206976b099b764ffe6f3547177d9e232377b9848b82cb1072ba not found: ID does not exist" Nov 25 17:33:50 crc kubenswrapper[4812]: I1125 17:33:50.008556 4812 scope.go:117] "RemoveContainer" containerID="8c617f8999d3032d3b8e04fb0fde653532d7e34c808a6f9b32471d4e1221d282" Nov 25 17:33:50 crc kubenswrapper[4812]: E1125 17:33:50.008841 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8c617f8999d3032d3b8e04fb0fde653532d7e34c808a6f9b32471d4e1221d282\": container with ID starting with 8c617f8999d3032d3b8e04fb0fde653532d7e34c808a6f9b32471d4e1221d282 not found: ID does not exist" containerID="8c617f8999d3032d3b8e04fb0fde653532d7e34c808a6f9b32471d4e1221d282" Nov 25 17:33:50 crc kubenswrapper[4812]: I1125 17:33:50.008891 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c617f8999d3032d3b8e04fb0fde653532d7e34c808a6f9b32471d4e1221d282"} err="failed to get container status \"8c617f8999d3032d3b8e04fb0fde653532d7e34c808a6f9b32471d4e1221d282\": rpc error: code = NotFound desc = could not find container \"8c617f8999d3032d3b8e04fb0fde653532d7e34c808a6f9b32471d4e1221d282\": container with ID starting with 8c617f8999d3032d3b8e04fb0fde653532d7e34c808a6f9b32471d4e1221d282 not found: ID does not exist" Nov 25 17:33:50 crc kubenswrapper[4812]: I1125 17:33:50.008925 4812 scope.go:117] "RemoveContainer" containerID="f468c98f50bcf88c592681586d0c04a434a8ffcc418b4717c100f3daa37e305d" Nov 25 17:33:50 crc kubenswrapper[4812]: E1125 17:33:50.009180 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f468c98f50bcf88c592681586d0c04a434a8ffcc418b4717c100f3daa37e305d\": container with ID starting with f468c98f50bcf88c592681586d0c04a434a8ffcc418b4717c100f3daa37e305d not found: ID does not exist" containerID="f468c98f50bcf88c592681586d0c04a434a8ffcc418b4717c100f3daa37e305d" Nov 25 17:33:50 crc kubenswrapper[4812]: I1125 17:33:50.009211 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f468c98f50bcf88c592681586d0c04a434a8ffcc418b4717c100f3daa37e305d"} err="failed to get container status \"f468c98f50bcf88c592681586d0c04a434a8ffcc418b4717c100f3daa37e305d\": rpc error: code = NotFound desc = could not find container \"f468c98f50bcf88c592681586d0c04a434a8ffcc418b4717c100f3daa37e305d\": container with ID starting with f468c98f50bcf88c592681586d0c04a434a8ffcc418b4717c100f3daa37e305d not found: ID does not exist" Nov 25 17:33:51 crc kubenswrapper[4812]: I1125 17:33:51.843202 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="72901cdb-a063-460f-be07-d7b04e7dcf33" path="/var/lib/kubelet/pods/72901cdb-a063-460f-be07-d7b04e7dcf33/volumes" Nov 25 17:33:57 crc kubenswrapper[4812]: I1125 17:33:57.333000 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:33:57 crc kubenswrapper[4812]: I1125 17:33:57.333494 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:33:57 crc kubenswrapper[4812]: I1125 17:33:57.333554 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" Nov 25 17:33:57 crc kubenswrapper[4812]: I1125 17:33:57.334220 4812 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4663c5c86743f0128022e03f1f9d0812eba97990871d15cc9bec70b7e56a04db"} pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 17:33:57 crc kubenswrapper[4812]: I1125 17:33:57.334269 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" containerID="cri-o://4663c5c86743f0128022e03f1f9d0812eba97990871d15cc9bec70b7e56a04db" gracePeriod=600 Nov 25 17:33:58 crc kubenswrapper[4812]: I1125 17:33:58.003292 4812 generic.go:334] "Generic (PLEG): container finished" podID="8ed911cf-2139-4b12-84ba-af635585ba29" containerID="4663c5c86743f0128022e03f1f9d0812eba97990871d15cc9bec70b7e56a04db" exitCode=0 Nov 25 17:33:58 crc kubenswrapper[4812]: I1125 17:33:58.003471 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" event={"ID":"8ed911cf-2139-4b12-84ba-af635585ba29","Type":"ContainerDied","Data":"4663c5c86743f0128022e03f1f9d0812eba97990871d15cc9bec70b7e56a04db"} Nov 25 17:33:58 crc kubenswrapper[4812]: I1125 17:33:58.004037 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" event={"ID":"8ed911cf-2139-4b12-84ba-af635585ba29","Type":"ContainerStarted","Data":"2ae50c0855798672cbbf542aacde517f5fc4d85f0598c9ce69441f2a863ae0cd"} Nov 25 17:33:58 crc kubenswrapper[4812]: I1125 17:33:58.004075 4812 scope.go:117] "RemoveContainer" containerID="96bbc51328ecd8c911234471c365bd30cd270eb8ef7d45da0893e8889cdaf1af" Nov 25 17:34:09 crc kubenswrapper[4812]: I1125 17:34:09.484021 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-cb6sj"] Nov 25 17:34:09 crc kubenswrapper[4812]: E1125 17:34:09.484944 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72901cdb-a063-460f-be07-d7b04e7dcf33" containerName="registry-server" Nov 25 17:34:09 crc kubenswrapper[4812]: I1125 17:34:09.484959 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="72901cdb-a063-460f-be07-d7b04e7dcf33" containerName="registry-server" Nov 25 17:34:09 crc kubenswrapper[4812]: E1125 17:34:09.484989 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72901cdb-a063-460f-be07-d7b04e7dcf33" containerName="extract-utilities" Nov 25 17:34:09 crc kubenswrapper[4812]: I1125 17:34:09.484998 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="72901cdb-a063-460f-be07-d7b04e7dcf33" containerName="extract-utilities" Nov 25 17:34:09 crc kubenswrapper[4812]: E1125 17:34:09.485014 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72901cdb-a063-460f-be07-d7b04e7dcf33" containerName="extract-content" Nov 25 17:34:09 crc kubenswrapper[4812]: I1125 17:34:09.485021 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="72901cdb-a063-460f-be07-d7b04e7dcf33" containerName="extract-content" Nov 25 17:34:09 crc kubenswrapper[4812]: I1125 17:34:09.485216 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="72901cdb-a063-460f-be07-d7b04e7dcf33" containerName="registry-server" Nov 25 17:34:09 crc kubenswrapper[4812]: I1125 17:34:09.487229 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cb6sj" Nov 25 17:34:09 crc kubenswrapper[4812]: I1125 17:34:09.497722 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cb6sj"] Nov 25 17:34:09 crc kubenswrapper[4812]: I1125 17:34:09.513331 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/546a9f2c-d49f-4d3d-8832-7c5ffa446f69-catalog-content\") pod \"certified-operators-cb6sj\" (UID: \"546a9f2c-d49f-4d3d-8832-7c5ffa446f69\") " pod="openshift-marketplace/certified-operators-cb6sj" Nov 25 17:34:09 crc kubenswrapper[4812]: I1125 17:34:09.513390 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pfjlq\" (UniqueName: \"kubernetes.io/projected/546a9f2c-d49f-4d3d-8832-7c5ffa446f69-kube-api-access-pfjlq\") pod \"certified-operators-cb6sj\" (UID: \"546a9f2c-d49f-4d3d-8832-7c5ffa446f69\") " pod="openshift-marketplace/certified-operators-cb6sj" Nov 25 17:34:09 crc kubenswrapper[4812]: I1125 17:34:09.513470 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/546a9f2c-d49f-4d3d-8832-7c5ffa446f69-utilities\") pod \"certified-operators-cb6sj\" (UID: \"546a9f2c-d49f-4d3d-8832-7c5ffa446f69\") " pod="openshift-marketplace/certified-operators-cb6sj" Nov 25 17:34:09 crc kubenswrapper[4812]: I1125 17:34:09.615176 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/546a9f2c-d49f-4d3d-8832-7c5ffa446f69-catalog-content\") pod \"certified-operators-cb6sj\" (UID: \"546a9f2c-d49f-4d3d-8832-7c5ffa446f69\") " pod="openshift-marketplace/certified-operators-cb6sj" Nov 25 17:34:09 crc kubenswrapper[4812]: I1125 17:34:09.615234 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pfjlq\" (UniqueName: \"kubernetes.io/projected/546a9f2c-d49f-4d3d-8832-7c5ffa446f69-kube-api-access-pfjlq\") pod \"certified-operators-cb6sj\" (UID: \"546a9f2c-d49f-4d3d-8832-7c5ffa446f69\") " pod="openshift-marketplace/certified-operators-cb6sj" Nov 25 17:34:09 crc kubenswrapper[4812]: I1125 17:34:09.615308 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/546a9f2c-d49f-4d3d-8832-7c5ffa446f69-utilities\") pod \"certified-operators-cb6sj\" (UID: \"546a9f2c-d49f-4d3d-8832-7c5ffa446f69\") " pod="openshift-marketplace/certified-operators-cb6sj" Nov 25 17:34:09 crc kubenswrapper[4812]: I1125 17:34:09.615700 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/546a9f2c-d49f-4d3d-8832-7c5ffa446f69-catalog-content\") pod \"certified-operators-cb6sj\" (UID: \"546a9f2c-d49f-4d3d-8832-7c5ffa446f69\") " pod="openshift-marketplace/certified-operators-cb6sj" Nov 25 17:34:09 crc kubenswrapper[4812]: I1125 17:34:09.615751 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/546a9f2c-d49f-4d3d-8832-7c5ffa446f69-utilities\") pod \"certified-operators-cb6sj\" (UID: \"546a9f2c-d49f-4d3d-8832-7c5ffa446f69\") " pod="openshift-marketplace/certified-operators-cb6sj" Nov 25 17:34:09 crc kubenswrapper[4812]: I1125 17:34:09.634043 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pfjlq\" (UniqueName: \"kubernetes.io/projected/546a9f2c-d49f-4d3d-8832-7c5ffa446f69-kube-api-access-pfjlq\") pod \"certified-operators-cb6sj\" (UID: \"546a9f2c-d49f-4d3d-8832-7c5ffa446f69\") " pod="openshift-marketplace/certified-operators-cb6sj" Nov 25 17:34:09 crc kubenswrapper[4812]: I1125 17:34:09.807045 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cb6sj" Nov 25 17:34:10 crc kubenswrapper[4812]: I1125 17:34:10.357192 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cb6sj"] Nov 25 17:34:11 crc kubenswrapper[4812]: I1125 17:34:11.170375 4812 generic.go:334] "Generic (PLEG): container finished" podID="546a9f2c-d49f-4d3d-8832-7c5ffa446f69" containerID="f4a1e41452f8c515426421aca91c76d5dc8f546c74935a3b5d78307bf56d3f54" exitCode=0 Nov 25 17:34:11 crc kubenswrapper[4812]: I1125 17:34:11.170484 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cb6sj" event={"ID":"546a9f2c-d49f-4d3d-8832-7c5ffa446f69","Type":"ContainerDied","Data":"f4a1e41452f8c515426421aca91c76d5dc8f546c74935a3b5d78307bf56d3f54"} Nov 25 17:34:11 crc kubenswrapper[4812]: I1125 17:34:11.170750 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cb6sj" event={"ID":"546a9f2c-d49f-4d3d-8832-7c5ffa446f69","Type":"ContainerStarted","Data":"270ed40296065e727ad8208654b9aa90cff02eaaa1e992f42624e87dccb5b8a4"} Nov 25 17:34:12 crc kubenswrapper[4812]: I1125 17:34:12.180073 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cb6sj" event={"ID":"546a9f2c-d49f-4d3d-8832-7c5ffa446f69","Type":"ContainerStarted","Data":"8b963a61f269cdd53ffb73b510174530b381da5051b66cfabc5d06131c50f7b5"} Nov 25 17:34:13 crc kubenswrapper[4812]: I1125 17:34:13.194309 4812 generic.go:334] "Generic (PLEG): container finished" podID="546a9f2c-d49f-4d3d-8832-7c5ffa446f69" containerID="8b963a61f269cdd53ffb73b510174530b381da5051b66cfabc5d06131c50f7b5" exitCode=0 Nov 25 17:34:13 crc kubenswrapper[4812]: I1125 17:34:13.194356 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cb6sj" event={"ID":"546a9f2c-d49f-4d3d-8832-7c5ffa446f69","Type":"ContainerDied","Data":"8b963a61f269cdd53ffb73b510174530b381da5051b66cfabc5d06131c50f7b5"} Nov 25 17:34:13 crc kubenswrapper[4812]: I1125 17:34:13.872486 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-65l9b"] Nov 25 17:34:13 crc kubenswrapper[4812]: I1125 17:34:13.874500 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-65l9b" Nov 25 17:34:13 crc kubenswrapper[4812]: I1125 17:34:13.886205 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-65l9b"] Nov 25 17:34:13 crc kubenswrapper[4812]: I1125 17:34:13.943133 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjk8t\" (UniqueName: \"kubernetes.io/projected/95c1bc67-f071-46a9-aff5-d8ba20766b8f-kube-api-access-zjk8t\") pod \"redhat-operators-65l9b\" (UID: \"95c1bc67-f071-46a9-aff5-d8ba20766b8f\") " pod="openshift-marketplace/redhat-operators-65l9b" Nov 25 17:34:13 crc kubenswrapper[4812]: I1125 17:34:13.943201 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95c1bc67-f071-46a9-aff5-d8ba20766b8f-catalog-content\") pod \"redhat-operators-65l9b\" (UID: \"95c1bc67-f071-46a9-aff5-d8ba20766b8f\") " pod="openshift-marketplace/redhat-operators-65l9b" Nov 25 17:34:13 crc kubenswrapper[4812]: I1125 17:34:13.943259 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95c1bc67-f071-46a9-aff5-d8ba20766b8f-utilities\") pod \"redhat-operators-65l9b\" (UID: \"95c1bc67-f071-46a9-aff5-d8ba20766b8f\") " pod="openshift-marketplace/redhat-operators-65l9b" Nov 25 17:34:14 crc kubenswrapper[4812]: I1125 17:34:14.044377 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjk8t\" (UniqueName: \"kubernetes.io/projected/95c1bc67-f071-46a9-aff5-d8ba20766b8f-kube-api-access-zjk8t\") pod \"redhat-operators-65l9b\" (UID: \"95c1bc67-f071-46a9-aff5-d8ba20766b8f\") " pod="openshift-marketplace/redhat-operators-65l9b" Nov 25 17:34:14 crc kubenswrapper[4812]: I1125 17:34:14.044446 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95c1bc67-f071-46a9-aff5-d8ba20766b8f-catalog-content\") pod \"redhat-operators-65l9b\" (UID: \"95c1bc67-f071-46a9-aff5-d8ba20766b8f\") " pod="openshift-marketplace/redhat-operators-65l9b" Nov 25 17:34:14 crc kubenswrapper[4812]: I1125 17:34:14.044501 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95c1bc67-f071-46a9-aff5-d8ba20766b8f-utilities\") pod \"redhat-operators-65l9b\" (UID: \"95c1bc67-f071-46a9-aff5-d8ba20766b8f\") " pod="openshift-marketplace/redhat-operators-65l9b" Nov 25 17:34:14 crc kubenswrapper[4812]: I1125 17:34:14.045081 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95c1bc67-f071-46a9-aff5-d8ba20766b8f-catalog-content\") pod \"redhat-operators-65l9b\" (UID: \"95c1bc67-f071-46a9-aff5-d8ba20766b8f\") " pod="openshift-marketplace/redhat-operators-65l9b" Nov 25 17:34:14 crc kubenswrapper[4812]: I1125 17:34:14.045354 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95c1bc67-f071-46a9-aff5-d8ba20766b8f-utilities\") pod \"redhat-operators-65l9b\" (UID: \"95c1bc67-f071-46a9-aff5-d8ba20766b8f\") " pod="openshift-marketplace/redhat-operators-65l9b" Nov 25 17:34:14 crc kubenswrapper[4812]: I1125 17:34:14.084642 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjk8t\" (UniqueName: \"kubernetes.io/projected/95c1bc67-f071-46a9-aff5-d8ba20766b8f-kube-api-access-zjk8t\") pod \"redhat-operators-65l9b\" (UID: \"95c1bc67-f071-46a9-aff5-d8ba20766b8f\") " pod="openshift-marketplace/redhat-operators-65l9b" Nov 25 17:34:14 crc kubenswrapper[4812]: I1125 17:34:14.204576 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-65l9b" Nov 25 17:34:14 crc kubenswrapper[4812]: I1125 17:34:14.218300 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cb6sj" event={"ID":"546a9f2c-d49f-4d3d-8832-7c5ffa446f69","Type":"ContainerStarted","Data":"9d5eb778f27aec679a75ab5e8f28eb2a7622485f227f8d9c7af55d9289ecb22a"} Nov 25 17:34:14 crc kubenswrapper[4812]: I1125 17:34:14.249316 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-cb6sj" podStartSLOduration=2.813461302 podStartE2EDuration="5.249294726s" podCreationTimestamp="2025-11-25 17:34:09 +0000 UTC" firstStartedPulling="2025-11-25 17:34:11.172949402 +0000 UTC m=+2826.013091497" lastFinishedPulling="2025-11-25 17:34:13.608782815 +0000 UTC m=+2828.448924921" observedRunningTime="2025-11-25 17:34:14.237339535 +0000 UTC m=+2829.077481640" watchObservedRunningTime="2025-11-25 17:34:14.249294726 +0000 UTC m=+2829.089436821" Nov 25 17:34:14 crc kubenswrapper[4812]: I1125 17:34:14.682893 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-65l9b"] Nov 25 17:34:14 crc kubenswrapper[4812]: W1125 17:34:14.684037 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod95c1bc67_f071_46a9_aff5_d8ba20766b8f.slice/crio-da47adfe7a2e5d1dfdd3ef5817e88419e22fcfa0c0cb20dbddc5713aed613056 WatchSource:0}: Error finding container da47adfe7a2e5d1dfdd3ef5817e88419e22fcfa0c0cb20dbddc5713aed613056: Status 404 returned error can't find the container with id da47adfe7a2e5d1dfdd3ef5817e88419e22fcfa0c0cb20dbddc5713aed613056 Nov 25 17:34:15 crc kubenswrapper[4812]: I1125 17:34:15.228167 4812 generic.go:334] "Generic (PLEG): container finished" podID="95c1bc67-f071-46a9-aff5-d8ba20766b8f" containerID="4fc1104e8ec1e4f3b92016d24396bf747a580f094b39cbf21d9c77f635b66fb7" exitCode=0 Nov 25 17:34:15 crc kubenswrapper[4812]: I1125 17:34:15.228274 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-65l9b" event={"ID":"95c1bc67-f071-46a9-aff5-d8ba20766b8f","Type":"ContainerDied","Data":"4fc1104e8ec1e4f3b92016d24396bf747a580f094b39cbf21d9c77f635b66fb7"} Nov 25 17:34:15 crc kubenswrapper[4812]: I1125 17:34:15.228491 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-65l9b" event={"ID":"95c1bc67-f071-46a9-aff5-d8ba20766b8f","Type":"ContainerStarted","Data":"da47adfe7a2e5d1dfdd3ef5817e88419e22fcfa0c0cb20dbddc5713aed613056"} Nov 25 17:34:16 crc kubenswrapper[4812]: I1125 17:34:16.239731 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-65l9b" event={"ID":"95c1bc67-f071-46a9-aff5-d8ba20766b8f","Type":"ContainerStarted","Data":"06c84f3d3cebfecf2eda7a37afae25e7818e142e188b4e65d1f90ff576dd2b11"} Nov 25 17:34:17 crc kubenswrapper[4812]: I1125 17:34:17.252506 4812 generic.go:334] "Generic (PLEG): container finished" podID="95c1bc67-f071-46a9-aff5-d8ba20766b8f" containerID="06c84f3d3cebfecf2eda7a37afae25e7818e142e188b4e65d1f90ff576dd2b11" exitCode=0 Nov 25 17:34:17 crc kubenswrapper[4812]: I1125 17:34:17.252839 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-65l9b" event={"ID":"95c1bc67-f071-46a9-aff5-d8ba20766b8f","Type":"ContainerDied","Data":"06c84f3d3cebfecf2eda7a37afae25e7818e142e188b4e65d1f90ff576dd2b11"} Nov 25 17:34:18 crc kubenswrapper[4812]: I1125 17:34:18.263794 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-65l9b" event={"ID":"95c1bc67-f071-46a9-aff5-d8ba20766b8f","Type":"ContainerStarted","Data":"5b32e7804a2a69512942159fd62f9a2bbcaf6ee6f0320ab3e1d17f7dd67fb543"} Nov 25 17:34:18 crc kubenswrapper[4812]: I1125 17:34:18.292086 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-65l9b" podStartSLOduration=2.851024338 podStartE2EDuration="5.292069061s" podCreationTimestamp="2025-11-25 17:34:13 +0000 UTC" firstStartedPulling="2025-11-25 17:34:15.22955364 +0000 UTC m=+2830.069695735" lastFinishedPulling="2025-11-25 17:34:17.670598363 +0000 UTC m=+2832.510740458" observedRunningTime="2025-11-25 17:34:18.281569777 +0000 UTC m=+2833.121711892" watchObservedRunningTime="2025-11-25 17:34:18.292069061 +0000 UTC m=+2833.132211156" Nov 25 17:34:19 crc kubenswrapper[4812]: I1125 17:34:19.807322 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-cb6sj" Nov 25 17:34:19 crc kubenswrapper[4812]: I1125 17:34:19.807731 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-cb6sj" Nov 25 17:34:19 crc kubenswrapper[4812]: I1125 17:34:19.846863 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-cb6sj" Nov 25 17:34:20 crc kubenswrapper[4812]: I1125 17:34:20.349117 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-cb6sj" Nov 25 17:34:21 crc kubenswrapper[4812]: I1125 17:34:21.072670 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-cb6sj"] Nov 25 17:34:22 crc kubenswrapper[4812]: I1125 17:34:22.304738 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-cb6sj" podUID="546a9f2c-d49f-4d3d-8832-7c5ffa446f69" containerName="registry-server" containerID="cri-o://9d5eb778f27aec679a75ab5e8f28eb2a7622485f227f8d9c7af55d9289ecb22a" gracePeriod=2 Nov 25 17:34:23 crc kubenswrapper[4812]: I1125 17:34:23.307819 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cb6sj" Nov 25 17:34:23 crc kubenswrapper[4812]: I1125 17:34:23.320060 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/546a9f2c-d49f-4d3d-8832-7c5ffa446f69-catalog-content\") pod \"546a9f2c-d49f-4d3d-8832-7c5ffa446f69\" (UID: \"546a9f2c-d49f-4d3d-8832-7c5ffa446f69\") " Nov 25 17:34:23 crc kubenswrapper[4812]: I1125 17:34:23.320107 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/546a9f2c-d49f-4d3d-8832-7c5ffa446f69-utilities\") pod \"546a9f2c-d49f-4d3d-8832-7c5ffa446f69\" (UID: \"546a9f2c-d49f-4d3d-8832-7c5ffa446f69\") " Nov 25 17:34:23 crc kubenswrapper[4812]: I1125 17:34:23.320142 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pfjlq\" (UniqueName: \"kubernetes.io/projected/546a9f2c-d49f-4d3d-8832-7c5ffa446f69-kube-api-access-pfjlq\") pod \"546a9f2c-d49f-4d3d-8832-7c5ffa446f69\" (UID: \"546a9f2c-d49f-4d3d-8832-7c5ffa446f69\") " Nov 25 17:34:23 crc kubenswrapper[4812]: I1125 17:34:23.320618 4812 generic.go:334] "Generic (PLEG): container finished" podID="546a9f2c-d49f-4d3d-8832-7c5ffa446f69" containerID="9d5eb778f27aec679a75ab5e8f28eb2a7622485f227f8d9c7af55d9289ecb22a" exitCode=0 Nov 25 17:34:23 crc kubenswrapper[4812]: I1125 17:34:23.320659 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cb6sj" event={"ID":"546a9f2c-d49f-4d3d-8832-7c5ffa446f69","Type":"ContainerDied","Data":"9d5eb778f27aec679a75ab5e8f28eb2a7622485f227f8d9c7af55d9289ecb22a"} Nov 25 17:34:23 crc kubenswrapper[4812]: I1125 17:34:23.320691 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cb6sj" event={"ID":"546a9f2c-d49f-4d3d-8832-7c5ffa446f69","Type":"ContainerDied","Data":"270ed40296065e727ad8208654b9aa90cff02eaaa1e992f42624e87dccb5b8a4"} Nov 25 17:34:23 crc kubenswrapper[4812]: I1125 17:34:23.320711 4812 scope.go:117] "RemoveContainer" containerID="9d5eb778f27aec679a75ab5e8f28eb2a7622485f227f8d9c7af55d9289ecb22a" Nov 25 17:34:23 crc kubenswrapper[4812]: I1125 17:34:23.320844 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cb6sj" Nov 25 17:34:23 crc kubenswrapper[4812]: I1125 17:34:23.321328 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/546a9f2c-d49f-4d3d-8832-7c5ffa446f69-utilities" (OuterVolumeSpecName: "utilities") pod "546a9f2c-d49f-4d3d-8832-7c5ffa446f69" (UID: "546a9f2c-d49f-4d3d-8832-7c5ffa446f69"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:34:23 crc kubenswrapper[4812]: I1125 17:34:23.330110 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/546a9f2c-d49f-4d3d-8832-7c5ffa446f69-kube-api-access-pfjlq" (OuterVolumeSpecName: "kube-api-access-pfjlq") pod "546a9f2c-d49f-4d3d-8832-7c5ffa446f69" (UID: "546a9f2c-d49f-4d3d-8832-7c5ffa446f69"). InnerVolumeSpecName "kube-api-access-pfjlq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:34:23 crc kubenswrapper[4812]: I1125 17:34:23.379353 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/546a9f2c-d49f-4d3d-8832-7c5ffa446f69-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "546a9f2c-d49f-4d3d-8832-7c5ffa446f69" (UID: "546a9f2c-d49f-4d3d-8832-7c5ffa446f69"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:34:23 crc kubenswrapper[4812]: I1125 17:34:23.400498 4812 scope.go:117] "RemoveContainer" containerID="8b963a61f269cdd53ffb73b510174530b381da5051b66cfabc5d06131c50f7b5" Nov 25 17:34:23 crc kubenswrapper[4812]: I1125 17:34:23.419206 4812 scope.go:117] "RemoveContainer" containerID="f4a1e41452f8c515426421aca91c76d5dc8f546c74935a3b5d78307bf56d3f54" Nov 25 17:34:23 crc kubenswrapper[4812]: I1125 17:34:23.422603 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/546a9f2c-d49f-4d3d-8832-7c5ffa446f69-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:34:23 crc kubenswrapper[4812]: I1125 17:34:23.422635 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pfjlq\" (UniqueName: \"kubernetes.io/projected/546a9f2c-d49f-4d3d-8832-7c5ffa446f69-kube-api-access-pfjlq\") on node \"crc\" DevicePath \"\"" Nov 25 17:34:23 crc kubenswrapper[4812]: I1125 17:34:23.422649 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/546a9f2c-d49f-4d3d-8832-7c5ffa446f69-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:34:23 crc kubenswrapper[4812]: I1125 17:34:23.469558 4812 scope.go:117] "RemoveContainer" containerID="9d5eb778f27aec679a75ab5e8f28eb2a7622485f227f8d9c7af55d9289ecb22a" Nov 25 17:34:23 crc kubenswrapper[4812]: E1125 17:34:23.470047 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d5eb778f27aec679a75ab5e8f28eb2a7622485f227f8d9c7af55d9289ecb22a\": container with ID starting with 9d5eb778f27aec679a75ab5e8f28eb2a7622485f227f8d9c7af55d9289ecb22a not found: ID does not exist" containerID="9d5eb778f27aec679a75ab5e8f28eb2a7622485f227f8d9c7af55d9289ecb22a" Nov 25 17:34:23 crc kubenswrapper[4812]: I1125 17:34:23.470083 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d5eb778f27aec679a75ab5e8f28eb2a7622485f227f8d9c7af55d9289ecb22a"} err="failed to get container status \"9d5eb778f27aec679a75ab5e8f28eb2a7622485f227f8d9c7af55d9289ecb22a\": rpc error: code = NotFound desc = could not find container \"9d5eb778f27aec679a75ab5e8f28eb2a7622485f227f8d9c7af55d9289ecb22a\": container with ID starting with 9d5eb778f27aec679a75ab5e8f28eb2a7622485f227f8d9c7af55d9289ecb22a not found: ID does not exist" Nov 25 17:34:23 crc kubenswrapper[4812]: I1125 17:34:23.470116 4812 scope.go:117] "RemoveContainer" containerID="8b963a61f269cdd53ffb73b510174530b381da5051b66cfabc5d06131c50f7b5" Nov 25 17:34:23 crc kubenswrapper[4812]: E1125 17:34:23.470477 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8b963a61f269cdd53ffb73b510174530b381da5051b66cfabc5d06131c50f7b5\": container with ID starting with 8b963a61f269cdd53ffb73b510174530b381da5051b66cfabc5d06131c50f7b5 not found: ID does not exist" containerID="8b963a61f269cdd53ffb73b510174530b381da5051b66cfabc5d06131c50f7b5" Nov 25 17:34:23 crc kubenswrapper[4812]: I1125 17:34:23.470503 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b963a61f269cdd53ffb73b510174530b381da5051b66cfabc5d06131c50f7b5"} err="failed to get container status \"8b963a61f269cdd53ffb73b510174530b381da5051b66cfabc5d06131c50f7b5\": rpc error: code = NotFound desc = could not find container \"8b963a61f269cdd53ffb73b510174530b381da5051b66cfabc5d06131c50f7b5\": container with ID starting with 8b963a61f269cdd53ffb73b510174530b381da5051b66cfabc5d06131c50f7b5 not found: ID does not exist" Nov 25 17:34:23 crc kubenswrapper[4812]: I1125 17:34:23.470520 4812 scope.go:117] "RemoveContainer" containerID="f4a1e41452f8c515426421aca91c76d5dc8f546c74935a3b5d78307bf56d3f54" Nov 25 17:34:23 crc kubenswrapper[4812]: E1125 17:34:23.470993 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f4a1e41452f8c515426421aca91c76d5dc8f546c74935a3b5d78307bf56d3f54\": container with ID starting with f4a1e41452f8c515426421aca91c76d5dc8f546c74935a3b5d78307bf56d3f54 not found: ID does not exist" containerID="f4a1e41452f8c515426421aca91c76d5dc8f546c74935a3b5d78307bf56d3f54" Nov 25 17:34:23 crc kubenswrapper[4812]: I1125 17:34:23.471018 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f4a1e41452f8c515426421aca91c76d5dc8f546c74935a3b5d78307bf56d3f54"} err="failed to get container status \"f4a1e41452f8c515426421aca91c76d5dc8f546c74935a3b5d78307bf56d3f54\": rpc error: code = NotFound desc = could not find container \"f4a1e41452f8c515426421aca91c76d5dc8f546c74935a3b5d78307bf56d3f54\": container with ID starting with f4a1e41452f8c515426421aca91c76d5dc8f546c74935a3b5d78307bf56d3f54 not found: ID does not exist" Nov 25 17:34:23 crc kubenswrapper[4812]: I1125 17:34:23.671410 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-cb6sj"] Nov 25 17:34:23 crc kubenswrapper[4812]: I1125 17:34:23.681229 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-cb6sj"] Nov 25 17:34:23 crc kubenswrapper[4812]: I1125 17:34:23.846621 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="546a9f2c-d49f-4d3d-8832-7c5ffa446f69" path="/var/lib/kubelet/pods/546a9f2c-d49f-4d3d-8832-7c5ffa446f69/volumes" Nov 25 17:34:24 crc kubenswrapper[4812]: I1125 17:34:24.205117 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-65l9b" Nov 25 17:34:24 crc kubenswrapper[4812]: I1125 17:34:24.205386 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-65l9b" Nov 25 17:34:24 crc kubenswrapper[4812]: I1125 17:34:24.254156 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-65l9b" Nov 25 17:34:24 crc kubenswrapper[4812]: I1125 17:34:24.384580 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-65l9b" Nov 25 17:34:26 crc kubenswrapper[4812]: I1125 17:34:26.657863 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-65l9b"] Nov 25 17:34:26 crc kubenswrapper[4812]: I1125 17:34:26.658378 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-65l9b" podUID="95c1bc67-f071-46a9-aff5-d8ba20766b8f" containerName="registry-server" containerID="cri-o://5b32e7804a2a69512942159fd62f9a2bbcaf6ee6f0320ab3e1d17f7dd67fb543" gracePeriod=2 Nov 25 17:34:28 crc kubenswrapper[4812]: I1125 17:34:28.225215 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-65l9b" Nov 25 17:34:28 crc kubenswrapper[4812]: I1125 17:34:28.341427 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zjk8t\" (UniqueName: \"kubernetes.io/projected/95c1bc67-f071-46a9-aff5-d8ba20766b8f-kube-api-access-zjk8t\") pod \"95c1bc67-f071-46a9-aff5-d8ba20766b8f\" (UID: \"95c1bc67-f071-46a9-aff5-d8ba20766b8f\") " Nov 25 17:34:28 crc kubenswrapper[4812]: I1125 17:34:28.341510 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95c1bc67-f071-46a9-aff5-d8ba20766b8f-catalog-content\") pod \"95c1bc67-f071-46a9-aff5-d8ba20766b8f\" (UID: \"95c1bc67-f071-46a9-aff5-d8ba20766b8f\") " Nov 25 17:34:28 crc kubenswrapper[4812]: I1125 17:34:28.341622 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95c1bc67-f071-46a9-aff5-d8ba20766b8f-utilities\") pod \"95c1bc67-f071-46a9-aff5-d8ba20766b8f\" (UID: \"95c1bc67-f071-46a9-aff5-d8ba20766b8f\") " Nov 25 17:34:28 crc kubenswrapper[4812]: I1125 17:34:28.342787 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/95c1bc67-f071-46a9-aff5-d8ba20766b8f-utilities" (OuterVolumeSpecName: "utilities") pod "95c1bc67-f071-46a9-aff5-d8ba20766b8f" (UID: "95c1bc67-f071-46a9-aff5-d8ba20766b8f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:34:28 crc kubenswrapper[4812]: I1125 17:34:28.344600 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95c1bc67-f071-46a9-aff5-d8ba20766b8f-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:34:28 crc kubenswrapper[4812]: I1125 17:34:28.347249 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/95c1bc67-f071-46a9-aff5-d8ba20766b8f-kube-api-access-zjk8t" (OuterVolumeSpecName: "kube-api-access-zjk8t") pod "95c1bc67-f071-46a9-aff5-d8ba20766b8f" (UID: "95c1bc67-f071-46a9-aff5-d8ba20766b8f"). InnerVolumeSpecName "kube-api-access-zjk8t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:34:28 crc kubenswrapper[4812]: I1125 17:34:28.371781 4812 generic.go:334] "Generic (PLEG): container finished" podID="95c1bc67-f071-46a9-aff5-d8ba20766b8f" containerID="5b32e7804a2a69512942159fd62f9a2bbcaf6ee6f0320ab3e1d17f7dd67fb543" exitCode=0 Nov 25 17:34:28 crc kubenswrapper[4812]: I1125 17:34:28.371826 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-65l9b" event={"ID":"95c1bc67-f071-46a9-aff5-d8ba20766b8f","Type":"ContainerDied","Data":"5b32e7804a2a69512942159fd62f9a2bbcaf6ee6f0320ab3e1d17f7dd67fb543"} Nov 25 17:34:28 crc kubenswrapper[4812]: I1125 17:34:28.371853 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-65l9b" event={"ID":"95c1bc67-f071-46a9-aff5-d8ba20766b8f","Type":"ContainerDied","Data":"da47adfe7a2e5d1dfdd3ef5817e88419e22fcfa0c0cb20dbddc5713aed613056"} Nov 25 17:34:28 crc kubenswrapper[4812]: I1125 17:34:28.371872 4812 scope.go:117] "RemoveContainer" containerID="5b32e7804a2a69512942159fd62f9a2bbcaf6ee6f0320ab3e1d17f7dd67fb543" Nov 25 17:34:28 crc kubenswrapper[4812]: I1125 17:34:28.371867 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-65l9b" Nov 25 17:34:28 crc kubenswrapper[4812]: I1125 17:34:28.421911 4812 scope.go:117] "RemoveContainer" containerID="06c84f3d3cebfecf2eda7a37afae25e7818e142e188b4e65d1f90ff576dd2b11" Nov 25 17:34:28 crc kubenswrapper[4812]: I1125 17:34:28.443694 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/95c1bc67-f071-46a9-aff5-d8ba20766b8f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "95c1bc67-f071-46a9-aff5-d8ba20766b8f" (UID: "95c1bc67-f071-46a9-aff5-d8ba20766b8f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:34:28 crc kubenswrapper[4812]: I1125 17:34:28.445461 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zjk8t\" (UniqueName: \"kubernetes.io/projected/95c1bc67-f071-46a9-aff5-d8ba20766b8f-kube-api-access-zjk8t\") on node \"crc\" DevicePath \"\"" Nov 25 17:34:28 crc kubenswrapper[4812]: I1125 17:34:28.445493 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95c1bc67-f071-46a9-aff5-d8ba20766b8f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:34:28 crc kubenswrapper[4812]: I1125 17:34:28.451058 4812 scope.go:117] "RemoveContainer" containerID="4fc1104e8ec1e4f3b92016d24396bf747a580f094b39cbf21d9c77f635b66fb7" Nov 25 17:34:28 crc kubenswrapper[4812]: I1125 17:34:28.496255 4812 scope.go:117] "RemoveContainer" containerID="5b32e7804a2a69512942159fd62f9a2bbcaf6ee6f0320ab3e1d17f7dd67fb543" Nov 25 17:34:28 crc kubenswrapper[4812]: E1125 17:34:28.496744 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5b32e7804a2a69512942159fd62f9a2bbcaf6ee6f0320ab3e1d17f7dd67fb543\": container with ID starting with 5b32e7804a2a69512942159fd62f9a2bbcaf6ee6f0320ab3e1d17f7dd67fb543 not found: ID does not exist" containerID="5b32e7804a2a69512942159fd62f9a2bbcaf6ee6f0320ab3e1d17f7dd67fb543" Nov 25 17:34:28 crc kubenswrapper[4812]: I1125 17:34:28.496780 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5b32e7804a2a69512942159fd62f9a2bbcaf6ee6f0320ab3e1d17f7dd67fb543"} err="failed to get container status \"5b32e7804a2a69512942159fd62f9a2bbcaf6ee6f0320ab3e1d17f7dd67fb543\": rpc error: code = NotFound desc = could not find container \"5b32e7804a2a69512942159fd62f9a2bbcaf6ee6f0320ab3e1d17f7dd67fb543\": container with ID starting with 5b32e7804a2a69512942159fd62f9a2bbcaf6ee6f0320ab3e1d17f7dd67fb543 not found: ID does not exist" Nov 25 17:34:28 crc kubenswrapper[4812]: I1125 17:34:28.496804 4812 scope.go:117] "RemoveContainer" containerID="06c84f3d3cebfecf2eda7a37afae25e7818e142e188b4e65d1f90ff576dd2b11" Nov 25 17:34:28 crc kubenswrapper[4812]: E1125 17:34:28.497212 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"06c84f3d3cebfecf2eda7a37afae25e7818e142e188b4e65d1f90ff576dd2b11\": container with ID starting with 06c84f3d3cebfecf2eda7a37afae25e7818e142e188b4e65d1f90ff576dd2b11 not found: ID does not exist" containerID="06c84f3d3cebfecf2eda7a37afae25e7818e142e188b4e65d1f90ff576dd2b11" Nov 25 17:34:28 crc kubenswrapper[4812]: I1125 17:34:28.497244 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"06c84f3d3cebfecf2eda7a37afae25e7818e142e188b4e65d1f90ff576dd2b11"} err="failed to get container status \"06c84f3d3cebfecf2eda7a37afae25e7818e142e188b4e65d1f90ff576dd2b11\": rpc error: code = NotFound desc = could not find container \"06c84f3d3cebfecf2eda7a37afae25e7818e142e188b4e65d1f90ff576dd2b11\": container with ID starting with 06c84f3d3cebfecf2eda7a37afae25e7818e142e188b4e65d1f90ff576dd2b11 not found: ID does not exist" Nov 25 17:34:28 crc kubenswrapper[4812]: I1125 17:34:28.497264 4812 scope.go:117] "RemoveContainer" containerID="4fc1104e8ec1e4f3b92016d24396bf747a580f094b39cbf21d9c77f635b66fb7" Nov 25 17:34:28 crc kubenswrapper[4812]: E1125 17:34:28.497506 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4fc1104e8ec1e4f3b92016d24396bf747a580f094b39cbf21d9c77f635b66fb7\": container with ID starting with 4fc1104e8ec1e4f3b92016d24396bf747a580f094b39cbf21d9c77f635b66fb7 not found: ID does not exist" containerID="4fc1104e8ec1e4f3b92016d24396bf747a580f094b39cbf21d9c77f635b66fb7" Nov 25 17:34:28 crc kubenswrapper[4812]: I1125 17:34:28.497548 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4fc1104e8ec1e4f3b92016d24396bf747a580f094b39cbf21d9c77f635b66fb7"} err="failed to get container status \"4fc1104e8ec1e4f3b92016d24396bf747a580f094b39cbf21d9c77f635b66fb7\": rpc error: code = NotFound desc = could not find container \"4fc1104e8ec1e4f3b92016d24396bf747a580f094b39cbf21d9c77f635b66fb7\": container with ID starting with 4fc1104e8ec1e4f3b92016d24396bf747a580f094b39cbf21d9c77f635b66fb7 not found: ID does not exist" Nov 25 17:34:28 crc kubenswrapper[4812]: I1125 17:34:28.724513 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-65l9b"] Nov 25 17:34:28 crc kubenswrapper[4812]: I1125 17:34:28.734808 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-65l9b"] Nov 25 17:34:29 crc kubenswrapper[4812]: I1125 17:34:29.845863 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="95c1bc67-f071-46a9-aff5-d8ba20766b8f" path="/var/lib/kubelet/pods/95c1bc67-f071-46a9-aff5-d8ba20766b8f/volumes" Nov 25 17:35:57 crc kubenswrapper[4812]: I1125 17:35:57.332662 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:35:57 crc kubenswrapper[4812]: I1125 17:35:57.333593 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:36:01 crc kubenswrapper[4812]: I1125 17:36:01.778070 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-bcx96"] Nov 25 17:36:01 crc kubenswrapper[4812]: E1125 17:36:01.779054 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95c1bc67-f071-46a9-aff5-d8ba20766b8f" containerName="extract-utilities" Nov 25 17:36:01 crc kubenswrapper[4812]: I1125 17:36:01.779068 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="95c1bc67-f071-46a9-aff5-d8ba20766b8f" containerName="extract-utilities" Nov 25 17:36:01 crc kubenswrapper[4812]: E1125 17:36:01.779081 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="546a9f2c-d49f-4d3d-8832-7c5ffa446f69" containerName="registry-server" Nov 25 17:36:01 crc kubenswrapper[4812]: I1125 17:36:01.779087 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="546a9f2c-d49f-4d3d-8832-7c5ffa446f69" containerName="registry-server" Nov 25 17:36:01 crc kubenswrapper[4812]: E1125 17:36:01.779112 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="546a9f2c-d49f-4d3d-8832-7c5ffa446f69" containerName="extract-content" Nov 25 17:36:01 crc kubenswrapper[4812]: I1125 17:36:01.779118 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="546a9f2c-d49f-4d3d-8832-7c5ffa446f69" containerName="extract-content" Nov 25 17:36:01 crc kubenswrapper[4812]: E1125 17:36:01.779127 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="546a9f2c-d49f-4d3d-8832-7c5ffa446f69" containerName="extract-utilities" Nov 25 17:36:01 crc kubenswrapper[4812]: I1125 17:36:01.779133 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="546a9f2c-d49f-4d3d-8832-7c5ffa446f69" containerName="extract-utilities" Nov 25 17:36:01 crc kubenswrapper[4812]: E1125 17:36:01.779150 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95c1bc67-f071-46a9-aff5-d8ba20766b8f" containerName="registry-server" Nov 25 17:36:01 crc kubenswrapper[4812]: I1125 17:36:01.779156 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="95c1bc67-f071-46a9-aff5-d8ba20766b8f" containerName="registry-server" Nov 25 17:36:01 crc kubenswrapper[4812]: E1125 17:36:01.779167 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95c1bc67-f071-46a9-aff5-d8ba20766b8f" containerName="extract-content" Nov 25 17:36:01 crc kubenswrapper[4812]: I1125 17:36:01.779173 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="95c1bc67-f071-46a9-aff5-d8ba20766b8f" containerName="extract-content" Nov 25 17:36:01 crc kubenswrapper[4812]: I1125 17:36:01.779354 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="546a9f2c-d49f-4d3d-8832-7c5ffa446f69" containerName="registry-server" Nov 25 17:36:01 crc kubenswrapper[4812]: I1125 17:36:01.779372 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="95c1bc67-f071-46a9-aff5-d8ba20766b8f" containerName="registry-server" Nov 25 17:36:01 crc kubenswrapper[4812]: I1125 17:36:01.780620 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bcx96" Nov 25 17:36:01 crc kubenswrapper[4812]: I1125 17:36:01.790559 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bcx96"] Nov 25 17:36:01 crc kubenswrapper[4812]: I1125 17:36:01.931642 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e-utilities\") pod \"community-operators-bcx96\" (UID: \"8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e\") " pod="openshift-marketplace/community-operators-bcx96" Nov 25 17:36:01 crc kubenswrapper[4812]: I1125 17:36:01.932015 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e-catalog-content\") pod \"community-operators-bcx96\" (UID: \"8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e\") " pod="openshift-marketplace/community-operators-bcx96" Nov 25 17:36:01 crc kubenswrapper[4812]: I1125 17:36:01.932226 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v69zb\" (UniqueName: \"kubernetes.io/projected/8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e-kube-api-access-v69zb\") pod \"community-operators-bcx96\" (UID: \"8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e\") " pod="openshift-marketplace/community-operators-bcx96" Nov 25 17:36:02 crc kubenswrapper[4812]: I1125 17:36:02.033885 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e-utilities\") pod \"community-operators-bcx96\" (UID: \"8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e\") " pod="openshift-marketplace/community-operators-bcx96" Nov 25 17:36:02 crc kubenswrapper[4812]: I1125 17:36:02.034000 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e-catalog-content\") pod \"community-operators-bcx96\" (UID: \"8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e\") " pod="openshift-marketplace/community-operators-bcx96" Nov 25 17:36:02 crc kubenswrapper[4812]: I1125 17:36:02.034043 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v69zb\" (UniqueName: \"kubernetes.io/projected/8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e-kube-api-access-v69zb\") pod \"community-operators-bcx96\" (UID: \"8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e\") " pod="openshift-marketplace/community-operators-bcx96" Nov 25 17:36:02 crc kubenswrapper[4812]: I1125 17:36:02.034694 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e-utilities\") pod \"community-operators-bcx96\" (UID: \"8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e\") " pod="openshift-marketplace/community-operators-bcx96" Nov 25 17:36:02 crc kubenswrapper[4812]: I1125 17:36:02.034754 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e-catalog-content\") pod \"community-operators-bcx96\" (UID: \"8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e\") " pod="openshift-marketplace/community-operators-bcx96" Nov 25 17:36:02 crc kubenswrapper[4812]: I1125 17:36:02.059153 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v69zb\" (UniqueName: \"kubernetes.io/projected/8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e-kube-api-access-v69zb\") pod \"community-operators-bcx96\" (UID: \"8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e\") " pod="openshift-marketplace/community-operators-bcx96" Nov 25 17:36:02 crc kubenswrapper[4812]: I1125 17:36:02.101986 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bcx96" Nov 25 17:36:02 crc kubenswrapper[4812]: I1125 17:36:02.657869 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bcx96"] Nov 25 17:36:03 crc kubenswrapper[4812]: I1125 17:36:03.330048 4812 generic.go:334] "Generic (PLEG): container finished" podID="8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e" containerID="96065346b83012419258f11568855c7a7ee439839c8f79e8da8bb4cb54b157b9" exitCode=0 Nov 25 17:36:03 crc kubenswrapper[4812]: I1125 17:36:03.330137 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bcx96" event={"ID":"8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e","Type":"ContainerDied","Data":"96065346b83012419258f11568855c7a7ee439839c8f79e8da8bb4cb54b157b9"} Nov 25 17:36:03 crc kubenswrapper[4812]: I1125 17:36:03.330438 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bcx96" event={"ID":"8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e","Type":"ContainerStarted","Data":"db952e3b96be182c973b78438d4acc1c53c4f9fc08cf29ef23a9fac4f7b38cbc"} Nov 25 17:36:04 crc kubenswrapper[4812]: I1125 17:36:04.341922 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bcx96" event={"ID":"8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e","Type":"ContainerStarted","Data":"23c913185a17eebf9580c4ee7687ecddd2c6d66645ae880f06037142de62c206"} Nov 25 17:36:05 crc kubenswrapper[4812]: I1125 17:36:05.352497 4812 generic.go:334] "Generic (PLEG): container finished" podID="8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e" containerID="23c913185a17eebf9580c4ee7687ecddd2c6d66645ae880f06037142de62c206" exitCode=0 Nov 25 17:36:05 crc kubenswrapper[4812]: I1125 17:36:05.352569 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bcx96" event={"ID":"8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e","Type":"ContainerDied","Data":"23c913185a17eebf9580c4ee7687ecddd2c6d66645ae880f06037142de62c206"} Nov 25 17:36:06 crc kubenswrapper[4812]: I1125 17:36:06.372003 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bcx96" event={"ID":"8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e","Type":"ContainerStarted","Data":"70be4f122a37fc8c4cdaeae5da9cce64aeb7baebc3c28493c700d8fc6218b5ff"} Nov 25 17:36:06 crc kubenswrapper[4812]: I1125 17:36:06.398048 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-bcx96" podStartSLOduration=2.970906244 podStartE2EDuration="5.398023501s" podCreationTimestamp="2025-11-25 17:36:01 +0000 UTC" firstStartedPulling="2025-11-25 17:36:03.335052369 +0000 UTC m=+2938.175194464" lastFinishedPulling="2025-11-25 17:36:05.762169616 +0000 UTC m=+2940.602311721" observedRunningTime="2025-11-25 17:36:06.388639979 +0000 UTC m=+2941.228782144" watchObservedRunningTime="2025-11-25 17:36:06.398023501 +0000 UTC m=+2941.238165596" Nov 25 17:36:12 crc kubenswrapper[4812]: I1125 17:36:12.102975 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-bcx96" Nov 25 17:36:12 crc kubenswrapper[4812]: I1125 17:36:12.103630 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-bcx96" Nov 25 17:36:12 crc kubenswrapper[4812]: I1125 17:36:12.208893 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-bcx96" Nov 25 17:36:12 crc kubenswrapper[4812]: I1125 17:36:12.480196 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-bcx96" Nov 25 17:36:12 crc kubenswrapper[4812]: I1125 17:36:12.523497 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bcx96"] Nov 25 17:36:14 crc kubenswrapper[4812]: I1125 17:36:14.439006 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-bcx96" podUID="8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e" containerName="registry-server" containerID="cri-o://70be4f122a37fc8c4cdaeae5da9cce64aeb7baebc3c28493c700d8fc6218b5ff" gracePeriod=2 Nov 25 17:36:15 crc kubenswrapper[4812]: I1125 17:36:15.453003 4812 generic.go:334] "Generic (PLEG): container finished" podID="8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e" containerID="70be4f122a37fc8c4cdaeae5da9cce64aeb7baebc3c28493c700d8fc6218b5ff" exitCode=0 Nov 25 17:36:15 crc kubenswrapper[4812]: I1125 17:36:15.453356 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bcx96" event={"ID":"8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e","Type":"ContainerDied","Data":"70be4f122a37fc8c4cdaeae5da9cce64aeb7baebc3c28493c700d8fc6218b5ff"} Nov 25 17:36:15 crc kubenswrapper[4812]: I1125 17:36:15.544426 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bcx96" Nov 25 17:36:15 crc kubenswrapper[4812]: I1125 17:36:15.615421 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e-utilities\") pod \"8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e\" (UID: \"8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e\") " Nov 25 17:36:15 crc kubenswrapper[4812]: I1125 17:36:15.616146 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v69zb\" (UniqueName: \"kubernetes.io/projected/8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e-kube-api-access-v69zb\") pod \"8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e\" (UID: \"8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e\") " Nov 25 17:36:15 crc kubenswrapper[4812]: I1125 17:36:15.616259 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e-catalog-content\") pod \"8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e\" (UID: \"8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e\") " Nov 25 17:36:15 crc kubenswrapper[4812]: I1125 17:36:15.616880 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e-utilities" (OuterVolumeSpecName: "utilities") pod "8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e" (UID: "8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:36:15 crc kubenswrapper[4812]: I1125 17:36:15.621764 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e-kube-api-access-v69zb" (OuterVolumeSpecName: "kube-api-access-v69zb") pod "8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e" (UID: "8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e"). InnerVolumeSpecName "kube-api-access-v69zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:36:15 crc kubenswrapper[4812]: I1125 17:36:15.669971 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e" (UID: "8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:36:15 crc kubenswrapper[4812]: I1125 17:36:15.718631 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:15 crc kubenswrapper[4812]: I1125 17:36:15.718675 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v69zb\" (UniqueName: \"kubernetes.io/projected/8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e-kube-api-access-v69zb\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:15 crc kubenswrapper[4812]: I1125 17:36:15.718689 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:16 crc kubenswrapper[4812]: I1125 17:36:16.462265 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bcx96" event={"ID":"8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e","Type":"ContainerDied","Data":"db952e3b96be182c973b78438d4acc1c53c4f9fc08cf29ef23a9fac4f7b38cbc"} Nov 25 17:36:16 crc kubenswrapper[4812]: I1125 17:36:16.462308 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bcx96" Nov 25 17:36:16 crc kubenswrapper[4812]: I1125 17:36:16.462330 4812 scope.go:117] "RemoveContainer" containerID="70be4f122a37fc8c4cdaeae5da9cce64aeb7baebc3c28493c700d8fc6218b5ff" Nov 25 17:36:16 crc kubenswrapper[4812]: I1125 17:36:16.489776 4812 scope.go:117] "RemoveContainer" containerID="23c913185a17eebf9580c4ee7687ecddd2c6d66645ae880f06037142de62c206" Nov 25 17:36:16 crc kubenswrapper[4812]: I1125 17:36:16.497492 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bcx96"] Nov 25 17:36:16 crc kubenswrapper[4812]: I1125 17:36:16.510113 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-bcx96"] Nov 25 17:36:16 crc kubenswrapper[4812]: I1125 17:36:16.541509 4812 scope.go:117] "RemoveContainer" containerID="96065346b83012419258f11568855c7a7ee439839c8f79e8da8bb4cb54b157b9" Nov 25 17:36:17 crc kubenswrapper[4812]: I1125 17:36:17.851620 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e" path="/var/lib/kubelet/pods/8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e/volumes" Nov 25 17:36:18 crc kubenswrapper[4812]: I1125 17:36:18.486939 4812 generic.go:334] "Generic (PLEG): container finished" podID="d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e" containerID="5e0d9d2b2026458d26cb3ebf57b96bd1031397c38209c542c876b0f431f119de" exitCode=0 Nov 25 17:36:18 crc kubenswrapper[4812]: I1125 17:36:18.486998 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" event={"ID":"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e","Type":"ContainerDied","Data":"5e0d9d2b2026458d26cb3ebf57b96bd1031397c38209c542c876b0f431f119de"} Nov 25 17:36:19 crc kubenswrapper[4812]: I1125 17:36:19.990900 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" Nov 25 17:36:20 crc kubenswrapper[4812]: I1125 17:36:20.124198 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-inventory\") pod \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " Nov 25 17:36:20 crc kubenswrapper[4812]: I1125 17:36:20.124273 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-ceph\") pod \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " Nov 25 17:36:20 crc kubenswrapper[4812]: I1125 17:36:20.124315 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-nova-migration-ssh-key-0\") pod \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " Nov 25 17:36:20 crc kubenswrapper[4812]: I1125 17:36:20.124342 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-ssh-key\") pod \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " Nov 25 17:36:20 crc kubenswrapper[4812]: I1125 17:36:20.124369 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-nova-extra-config-0\") pod \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " Nov 25 17:36:20 crc kubenswrapper[4812]: I1125 17:36:20.124417 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-nova-cell1-compute-config-0\") pod \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " Nov 25 17:36:20 crc kubenswrapper[4812]: I1125 17:36:20.124451 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nmlbw\" (UniqueName: \"kubernetes.io/projected/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-kube-api-access-nmlbw\") pod \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " Nov 25 17:36:20 crc kubenswrapper[4812]: I1125 17:36:20.124520 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-nova-custom-ceph-combined-ca-bundle\") pod \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " Nov 25 17:36:20 crc kubenswrapper[4812]: I1125 17:36:20.124624 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-nova-cell1-compute-config-1\") pod \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " Nov 25 17:36:20 crc kubenswrapper[4812]: I1125 17:36:20.124705 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-ceph-nova-0\") pod \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " Nov 25 17:36:20 crc kubenswrapper[4812]: I1125 17:36:20.124770 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-nova-migration-ssh-key-1\") pod \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\" (UID: \"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e\") " Nov 25 17:36:20 crc kubenswrapper[4812]: I1125 17:36:20.137991 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-ceph" (OuterVolumeSpecName: "ceph") pod "d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e" (UID: "d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:36:20 crc kubenswrapper[4812]: I1125 17:36:20.138135 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-nova-custom-ceph-combined-ca-bundle" (OuterVolumeSpecName: "nova-custom-ceph-combined-ca-bundle") pod "d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e" (UID: "d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e"). InnerVolumeSpecName "nova-custom-ceph-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:36:20 crc kubenswrapper[4812]: I1125 17:36:20.151314 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-kube-api-access-nmlbw" (OuterVolumeSpecName: "kube-api-access-nmlbw") pod "d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e" (UID: "d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e"). InnerVolumeSpecName "kube-api-access-nmlbw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:36:20 crc kubenswrapper[4812]: I1125 17:36:20.157646 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e" (UID: "d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:36:20 crc kubenswrapper[4812]: I1125 17:36:20.158763 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e" (UID: "d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:36:20 crc kubenswrapper[4812]: I1125 17:36:20.160037 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e" (UID: "d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:36:20 crc kubenswrapper[4812]: I1125 17:36:20.163724 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e" (UID: "d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:36:20 crc kubenswrapper[4812]: I1125 17:36:20.170509 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-inventory" (OuterVolumeSpecName: "inventory") pod "d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e" (UID: "d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:36:20 crc kubenswrapper[4812]: I1125 17:36:20.178847 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-ceph-nova-0" (OuterVolumeSpecName: "ceph-nova-0") pod "d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e" (UID: "d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e"). InnerVolumeSpecName "ceph-nova-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:36:20 crc kubenswrapper[4812]: I1125 17:36:20.179920 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e" (UID: "d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:36:20 crc kubenswrapper[4812]: I1125 17:36:20.181423 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e" (UID: "d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:36:20 crc kubenswrapper[4812]: I1125 17:36:20.227436 4812 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:20 crc kubenswrapper[4812]: I1125 17:36:20.227475 4812 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:20 crc kubenswrapper[4812]: I1125 17:36:20.227489 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nmlbw\" (UniqueName: \"kubernetes.io/projected/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-kube-api-access-nmlbw\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:20 crc kubenswrapper[4812]: I1125 17:36:20.227501 4812 reconciler_common.go:293] "Volume detached for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-nova-custom-ceph-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:20 crc kubenswrapper[4812]: I1125 17:36:20.227513 4812 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:20 crc kubenswrapper[4812]: I1125 17:36:20.227526 4812 reconciler_common.go:293] "Volume detached for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-ceph-nova-0\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:20 crc kubenswrapper[4812]: I1125 17:36:20.227553 4812 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:20 crc kubenswrapper[4812]: I1125 17:36:20.227566 4812 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:20 crc kubenswrapper[4812]: I1125 17:36:20.227577 4812 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:20 crc kubenswrapper[4812]: I1125 17:36:20.227587 4812 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:20 crc kubenswrapper[4812]: I1125 17:36:20.227598 4812 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:20 crc kubenswrapper[4812]: I1125 17:36:20.504926 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" event={"ID":"d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e","Type":"ContainerDied","Data":"2eea05cd43b5a1f4262fdd661957a668062fe28210dc54c88efb13e18497f879"} Nov 25 17:36:20 crc kubenswrapper[4812]: I1125 17:36:20.504969 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2eea05cd43b5a1f4262fdd661957a668062fe28210dc54c88efb13e18497f879" Nov 25 17:36:20 crc kubenswrapper[4812]: I1125 17:36:20.505508 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-pt6fg" Nov 25 17:36:27 crc kubenswrapper[4812]: I1125 17:36:27.332389 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:36:27 crc kubenswrapper[4812]: I1125 17:36:27.333001 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.124545 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-volume-volume1-0"] Nov 25 17:36:36 crc kubenswrapper[4812]: E1125 17:36:36.125473 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e" containerName="extract-content" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.125489 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e" containerName="extract-content" Nov 25 17:36:36 crc kubenswrapper[4812]: E1125 17:36:36.125516 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e" containerName="extract-utilities" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.125524 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e" containerName="extract-utilities" Nov 25 17:36:36 crc kubenswrapper[4812]: E1125 17:36:36.125551 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e" containerName="registry-server" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.125559 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e" containerName="registry-server" Nov 25 17:36:36 crc kubenswrapper[4812]: E1125 17:36:36.125582 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e" containerName="nova-custom-ceph-edpm-deployment-openstack-edpm-ipam" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.125591 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e" containerName="nova-custom-ceph-edpm-deployment-openstack-edpm-ipam" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.125797 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b0dd792-e2d4-49f8-a80d-dbd0b3c4636e" containerName="registry-server" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.125816 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4e6d41e-fef0-4ef7-a3e7-96d5c5be146e" containerName="nova-custom-ceph-edpm-deployment-openstack-edpm-ipam" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.127204 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.129922 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.130216 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-volume-volume1-config-data" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.147216 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-volume1-0"] Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.222930 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/aeb24c53-3056-44ac-bf12-e203596d2f63-sys\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.222982 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/aeb24c53-3056-44ac-bf12-e203596d2f63-dev\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.223005 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/aeb24c53-3056-44ac-bf12-e203596d2f63-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.223028 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aeb24c53-3056-44ac-bf12-e203596d2f63-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.223230 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/aeb24c53-3056-44ac-bf12-e203596d2f63-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.223295 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/aeb24c53-3056-44ac-bf12-e203596d2f63-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.223336 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/aeb24c53-3056-44ac-bf12-e203596d2f63-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.223382 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aeb24c53-3056-44ac-bf12-e203596d2f63-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.223447 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/aeb24c53-3056-44ac-bf12-e203596d2f63-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.223503 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/aeb24c53-3056-44ac-bf12-e203596d2f63-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.223601 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/aeb24c53-3056-44ac-bf12-e203596d2f63-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.223621 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aeb24c53-3056-44ac-bf12-e203596d2f63-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.223667 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/aeb24c53-3056-44ac-bf12-e203596d2f63-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.223691 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/aeb24c53-3056-44ac-bf12-e203596d2f63-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.223725 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xjm8n\" (UniqueName: \"kubernetes.io/projected/aeb24c53-3056-44ac-bf12-e203596d2f63-kube-api-access-xjm8n\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.223759 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/aeb24c53-3056-44ac-bf12-e203596d2f63-run\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.245384 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-backup-0"] Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.253590 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.255909 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-backup-config-data" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.279517 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.325226 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/aeb24c53-3056-44ac-bf12-e203596d2f63-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.325277 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aeb24c53-3056-44ac-bf12-e203596d2f63-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.325305 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/69e56055-e6c8-4ec1-8c22-1bbfb185086f-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.325335 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/69e56055-e6c8-4ec1-8c22-1bbfb185086f-etc-nvme\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.325365 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/aeb24c53-3056-44ac-bf12-e203596d2f63-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.325387 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/69e56055-e6c8-4ec1-8c22-1bbfb185086f-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.325410 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/69e56055-e6c8-4ec1-8c22-1bbfb185086f-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.325431 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/aeb24c53-3056-44ac-bf12-e203596d2f63-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.325460 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xjm8n\" (UniqueName: \"kubernetes.io/projected/aeb24c53-3056-44ac-bf12-e203596d2f63-kube-api-access-xjm8n\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.325487 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/aeb24c53-3056-44ac-bf12-e203596d2f63-run\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.325513 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/aeb24c53-3056-44ac-bf12-e203596d2f63-sys\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.325556 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/aeb24c53-3056-44ac-bf12-e203596d2f63-dev\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.325581 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/aeb24c53-3056-44ac-bf12-e203596d2f63-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.325608 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/69e56055-e6c8-4ec1-8c22-1bbfb185086f-ceph\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.325640 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aeb24c53-3056-44ac-bf12-e203596d2f63-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.325677 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/69e56055-e6c8-4ec1-8c22-1bbfb185086f-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.325701 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/69e56055-e6c8-4ec1-8c22-1bbfb185086f-dev\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.325730 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69e56055-e6c8-4ec1-8c22-1bbfb185086f-config-data\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.325763 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lpb6m\" (UniqueName: \"kubernetes.io/projected/69e56055-e6c8-4ec1-8c22-1bbfb185086f-kube-api-access-lpb6m\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.325788 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/69e56055-e6c8-4ec1-8c22-1bbfb185086f-config-data-custom\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.325822 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/aeb24c53-3056-44ac-bf12-e203596d2f63-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.325851 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/aeb24c53-3056-44ac-bf12-e203596d2f63-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.325876 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/69e56055-e6c8-4ec1-8c22-1bbfb185086f-run\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.325897 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/aeb24c53-3056-44ac-bf12-e203596d2f63-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.325919 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69e56055-e6c8-4ec1-8c22-1bbfb185086f-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.325943 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/69e56055-e6c8-4ec1-8c22-1bbfb185086f-lib-modules\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.325964 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/69e56055-e6c8-4ec1-8c22-1bbfb185086f-sys\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.325987 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aeb24c53-3056-44ac-bf12-e203596d2f63-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.326014 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/69e56055-e6c8-4ec1-8c22-1bbfb185086f-scripts\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.326048 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/aeb24c53-3056-44ac-bf12-e203596d2f63-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.326085 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/aeb24c53-3056-44ac-bf12-e203596d2f63-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.326131 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/69e56055-e6c8-4ec1-8c22-1bbfb185086f-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.329921 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/aeb24c53-3056-44ac-bf12-e203596d2f63-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.329950 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/aeb24c53-3056-44ac-bf12-e203596d2f63-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.329988 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/aeb24c53-3056-44ac-bf12-e203596d2f63-dev\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.329994 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/aeb24c53-3056-44ac-bf12-e203596d2f63-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.329999 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/aeb24c53-3056-44ac-bf12-e203596d2f63-run\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.330018 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/aeb24c53-3056-44ac-bf12-e203596d2f63-sys\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.330049 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/aeb24c53-3056-44ac-bf12-e203596d2f63-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.330010 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/aeb24c53-3056-44ac-bf12-e203596d2f63-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.330161 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/aeb24c53-3056-44ac-bf12-e203596d2f63-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.330235 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/aeb24c53-3056-44ac-bf12-e203596d2f63-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.333195 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/aeb24c53-3056-44ac-bf12-e203596d2f63-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.333577 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aeb24c53-3056-44ac-bf12-e203596d2f63-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.334118 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/aeb24c53-3056-44ac-bf12-e203596d2f63-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.336885 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aeb24c53-3056-44ac-bf12-e203596d2f63-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.338597 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aeb24c53-3056-44ac-bf12-e203596d2f63-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.351236 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xjm8n\" (UniqueName: \"kubernetes.io/projected/aeb24c53-3056-44ac-bf12-e203596d2f63-kube-api-access-xjm8n\") pod \"cinder-volume-volume1-0\" (UID: \"aeb24c53-3056-44ac-bf12-e203596d2f63\") " pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.427577 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/69e56055-e6c8-4ec1-8c22-1bbfb185086f-ceph\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.428235 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/69e56055-e6c8-4ec1-8c22-1bbfb185086f-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.428332 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/69e56055-e6c8-4ec1-8c22-1bbfb185086f-dev\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.428419 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69e56055-e6c8-4ec1-8c22-1bbfb185086f-config-data\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.428501 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lpb6m\" (UniqueName: \"kubernetes.io/projected/69e56055-e6c8-4ec1-8c22-1bbfb185086f-kube-api-access-lpb6m\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.428605 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/69e56055-e6c8-4ec1-8c22-1bbfb185086f-config-data-custom\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.428713 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/69e56055-e6c8-4ec1-8c22-1bbfb185086f-run\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.428776 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/69e56055-e6c8-4ec1-8c22-1bbfb185086f-run\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.428438 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/69e56055-e6c8-4ec1-8c22-1bbfb185086f-dev\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.428375 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/69e56055-e6c8-4ec1-8c22-1bbfb185086f-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.428788 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69e56055-e6c8-4ec1-8c22-1bbfb185086f-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.429020 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/69e56055-e6c8-4ec1-8c22-1bbfb185086f-sys\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.429061 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/69e56055-e6c8-4ec1-8c22-1bbfb185086f-sys\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.429098 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/69e56055-e6c8-4ec1-8c22-1bbfb185086f-lib-modules\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.429235 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/69e56055-e6c8-4ec1-8c22-1bbfb185086f-scripts\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.429303 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/69e56055-e6c8-4ec1-8c22-1bbfb185086f-lib-modules\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.429400 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/69e56055-e6c8-4ec1-8c22-1bbfb185086f-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.429480 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/69e56055-e6c8-4ec1-8c22-1bbfb185086f-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.429574 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/69e56055-e6c8-4ec1-8c22-1bbfb185086f-etc-nvme\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.429659 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/69e56055-e6c8-4ec1-8c22-1bbfb185086f-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.429715 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/69e56055-e6c8-4ec1-8c22-1bbfb185086f-etc-nvme\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.429525 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/69e56055-e6c8-4ec1-8c22-1bbfb185086f-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.429656 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/69e56055-e6c8-4ec1-8c22-1bbfb185086f-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.429776 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/69e56055-e6c8-4ec1-8c22-1bbfb185086f-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.429734 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/69e56055-e6c8-4ec1-8c22-1bbfb185086f-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.429907 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/69e56055-e6c8-4ec1-8c22-1bbfb185086f-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.431698 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/69e56055-e6c8-4ec1-8c22-1bbfb185086f-config-data-custom\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.431959 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/69e56055-e6c8-4ec1-8c22-1bbfb185086f-ceph\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.432214 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69e56055-e6c8-4ec1-8c22-1bbfb185086f-config-data\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.433065 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/69e56055-e6c8-4ec1-8c22-1bbfb185086f-scripts\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.433259 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69e56055-e6c8-4ec1-8c22-1bbfb185086f-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.447176 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lpb6m\" (UniqueName: \"kubernetes.io/projected/69e56055-e6c8-4ec1-8c22-1bbfb185086f-kube-api-access-lpb6m\") pod \"cinder-backup-0\" (UID: \"69e56055-e6c8-4ec1-8c22-1bbfb185086f\") " pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.448848 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.579781 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.644924 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-db-create-r4w2f"] Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.646767 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-r4w2f" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.661653 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-create-r4w2f"] Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.668402 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-6748cb846f-77xjv"] Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.673980 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6748cb846f-77xjv" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.678784 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.678844 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.678949 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.678989 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-pvp24" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.694679 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6748cb846f-77xjv"] Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.735380 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wsmxp\" (UniqueName: \"kubernetes.io/projected/2e556e3f-d3af-47df-9561-e93c12e281d1-kube-api-access-wsmxp\") pod \"manila-db-create-r4w2f\" (UID: \"2e556e3f-d3af-47df-9561-e93c12e281d1\") " pod="openstack/manila-db-create-r4w2f" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.735583 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e556e3f-d3af-47df-9561-e93c12e281d1-operator-scripts\") pod \"manila-db-create-r4w2f\" (UID: \"2e556e3f-d3af-47df-9561-e93c12e281d1\") " pod="openstack/manila-db-create-r4w2f" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.770573 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-079a-account-create-xdmnx"] Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.771645 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-079a-account-create-xdmnx" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.775752 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-db-secret" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.796759 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-079a-account-create-xdmnx"] Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.808022 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.809486 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.811586 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.811755 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.811854 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.812064 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-9djh4" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.842029 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e556e3f-d3af-47df-9561-e93c12e281d1-operator-scripts\") pod \"manila-db-create-r4w2f\" (UID: \"2e556e3f-d3af-47df-9561-e93c12e281d1\") " pod="openstack/manila-db-create-r4w2f" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.842068 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e-config-data\") pod \"horizon-6748cb846f-77xjv\" (UID: \"c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e\") " pod="openstack/horizon-6748cb846f-77xjv" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.842139 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e-logs\") pod \"horizon-6748cb846f-77xjv\" (UID: \"c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e\") " pod="openstack/horizon-6748cb846f-77xjv" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.842185 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e-horizon-secret-key\") pod \"horizon-6748cb846f-77xjv\" (UID: \"c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e\") " pod="openstack/horizon-6748cb846f-77xjv" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.842334 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j6zzl\" (UniqueName: \"kubernetes.io/projected/c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e-kube-api-access-j6zzl\") pod \"horizon-6748cb846f-77xjv\" (UID: \"c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e\") " pod="openstack/horizon-6748cb846f-77xjv" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.842373 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e-scripts\") pod \"horizon-6748cb846f-77xjv\" (UID: \"c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e\") " pod="openstack/horizon-6748cb846f-77xjv" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.842430 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wsmxp\" (UniqueName: \"kubernetes.io/projected/2e556e3f-d3af-47df-9561-e93c12e281d1-kube-api-access-wsmxp\") pod \"manila-db-create-r4w2f\" (UID: \"2e556e3f-d3af-47df-9561-e93c12e281d1\") " pod="openstack/manila-db-create-r4w2f" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.843020 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e556e3f-d3af-47df-9561-e93c12e281d1-operator-scripts\") pod \"manila-db-create-r4w2f\" (UID: \"2e556e3f-d3af-47df-9561-e93c12e281d1\") " pod="openstack/manila-db-create-r4w2f" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.872274 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wsmxp\" (UniqueName: \"kubernetes.io/projected/2e556e3f-d3af-47df-9561-e93c12e281d1-kube-api-access-wsmxp\") pod \"manila-db-create-r4w2f\" (UID: \"2e556e3f-d3af-47df-9561-e93c12e281d1\") " pod="openstack/manila-db-create-r4w2f" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.879586 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.901583 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-76d45dc6b9-tlnnp"] Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.903150 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-76d45dc6b9-tlnnp" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.912803 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-76d45dc6b9-tlnnp"] Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.928888 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.931359 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.935837 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.935867 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.944469 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/935d1166-b0ca-4ffe-836a-7ab7278ca919-config-data\") pod \"glance-default-internal-api-0\" (UID: \"935d1166-b0ca-4ffe-836a-7ab7278ca919\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.944895 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e-config-data\") pod \"horizon-6748cb846f-77xjv\" (UID: \"c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e\") " pod="openstack/horizon-6748cb846f-77xjv" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.944949 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e-logs\") pod \"horizon-6748cb846f-77xjv\" (UID: \"c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e\") " pod="openstack/horizon-6748cb846f-77xjv" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.944966 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/935d1166-b0ca-4ffe-836a-7ab7278ca919-scripts\") pod \"glance-default-internal-api-0\" (UID: \"935d1166-b0ca-4ffe-836a-7ab7278ca919\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.944992 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e-horizon-secret-key\") pod \"horizon-6748cb846f-77xjv\" (UID: \"c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e\") " pod="openstack/horizon-6748cb846f-77xjv" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.945010 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"935d1166-b0ca-4ffe-836a-7ab7278ca919\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.945034 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/935d1166-b0ca-4ffe-836a-7ab7278ca919-logs\") pod \"glance-default-internal-api-0\" (UID: \"935d1166-b0ca-4ffe-836a-7ab7278ca919\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.945086 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k9274\" (UniqueName: \"kubernetes.io/projected/9d15ab92-82ab-47f1-b377-eec15c9c7b99-kube-api-access-k9274\") pod \"manila-079a-account-create-xdmnx\" (UID: \"9d15ab92-82ab-47f1-b377-eec15c9c7b99\") " pod="openstack/manila-079a-account-create-xdmnx" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.945106 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9d15ab92-82ab-47f1-b377-eec15c9c7b99-operator-scripts\") pod \"manila-079a-account-create-xdmnx\" (UID: \"9d15ab92-82ab-47f1-b377-eec15c9c7b99\") " pod="openstack/manila-079a-account-create-xdmnx" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.945125 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5sdgg\" (UniqueName: \"kubernetes.io/projected/935d1166-b0ca-4ffe-836a-7ab7278ca919-kube-api-access-5sdgg\") pod \"glance-default-internal-api-0\" (UID: \"935d1166-b0ca-4ffe-836a-7ab7278ca919\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.945149 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j6zzl\" (UniqueName: \"kubernetes.io/projected/c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e-kube-api-access-j6zzl\") pod \"horizon-6748cb846f-77xjv\" (UID: \"c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e\") " pod="openstack/horizon-6748cb846f-77xjv" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.945170 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e-scripts\") pod \"horizon-6748cb846f-77xjv\" (UID: \"c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e\") " pod="openstack/horizon-6748cb846f-77xjv" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.945188 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/935d1166-b0ca-4ffe-836a-7ab7278ca919-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"935d1166-b0ca-4ffe-836a-7ab7278ca919\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.945208 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/935d1166-b0ca-4ffe-836a-7ab7278ca919-ceph\") pod \"glance-default-internal-api-0\" (UID: \"935d1166-b0ca-4ffe-836a-7ab7278ca919\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.945267 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/935d1166-b0ca-4ffe-836a-7ab7278ca919-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"935d1166-b0ca-4ffe-836a-7ab7278ca919\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.945303 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/935d1166-b0ca-4ffe-836a-7ab7278ca919-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"935d1166-b0ca-4ffe-836a-7ab7278ca919\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.947059 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e-logs\") pod \"horizon-6748cb846f-77xjv\" (UID: \"c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e\") " pod="openstack/horizon-6748cb846f-77xjv" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.947240 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e-config-data\") pod \"horizon-6748cb846f-77xjv\" (UID: \"c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e\") " pod="openstack/horizon-6748cb846f-77xjv" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.953762 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e-scripts\") pod \"horizon-6748cb846f-77xjv\" (UID: \"c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e\") " pod="openstack/horizon-6748cb846f-77xjv" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.956120 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e-horizon-secret-key\") pod \"horizon-6748cb846f-77xjv\" (UID: \"c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e\") " pod="openstack/horizon-6748cb846f-77xjv" Nov 25 17:36:36 crc kubenswrapper[4812]: I1125 17:36:36.956167 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.003119 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j6zzl\" (UniqueName: \"kubernetes.io/projected/c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e-kube-api-access-j6zzl\") pod \"horizon-6748cb846f-77xjv\" (UID: \"c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e\") " pod="openstack/horizon-6748cb846f-77xjv" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.047553 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/935d1166-b0ca-4ffe-836a-7ab7278ca919-config-data\") pod \"glance-default-internal-api-0\" (UID: \"935d1166-b0ca-4ffe-836a-7ab7278ca919\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.047601 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd278622-9317-44c5-a14c-07df7a2bfab0-logs\") pod \"horizon-76d45dc6b9-tlnnp\" (UID: \"cd278622-9317-44c5-a14c-07df7a2bfab0\") " pod="openstack/horizon-76d45dc6b9-tlnnp" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.047620 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-scripts\") pod \"glance-default-external-api-0\" (UID: \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.047666 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/935d1166-b0ca-4ffe-836a-7ab7278ca919-scripts\") pod \"glance-default-internal-api-0\" (UID: \"935d1166-b0ca-4ffe-836a-7ab7278ca919\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.047687 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-config-data\") pod \"glance-default-external-api-0\" (UID: \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.047715 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"935d1166-b0ca-4ffe-836a-7ab7278ca919\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.047740 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/935d1166-b0ca-4ffe-836a-7ab7278ca919-logs\") pod \"glance-default-internal-api-0\" (UID: \"935d1166-b0ca-4ffe-836a-7ab7278ca919\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.047757 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cd278622-9317-44c5-a14c-07df7a2bfab0-config-data\") pod \"horizon-76d45dc6b9-tlnnp\" (UID: \"cd278622-9317-44c5-a14c-07df7a2bfab0\") " pod="openstack/horizon-76d45dc6b9-tlnnp" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.047789 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbmk4\" (UniqueName: \"kubernetes.io/projected/cd278622-9317-44c5-a14c-07df7a2bfab0-kube-api-access-bbmk4\") pod \"horizon-76d45dc6b9-tlnnp\" (UID: \"cd278622-9317-44c5-a14c-07df7a2bfab0\") " pod="openstack/horizon-76d45dc6b9-tlnnp" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.047812 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cd278622-9317-44c5-a14c-07df7a2bfab0-scripts\") pod \"horizon-76d45dc6b9-tlnnp\" (UID: \"cd278622-9317-44c5-a14c-07df7a2bfab0\") " pod="openstack/horizon-76d45dc6b9-tlnnp" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.047831 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k9274\" (UniqueName: \"kubernetes.io/projected/9d15ab92-82ab-47f1-b377-eec15c9c7b99-kube-api-access-k9274\") pod \"manila-079a-account-create-xdmnx\" (UID: \"9d15ab92-82ab-47f1-b377-eec15c9c7b99\") " pod="openstack/manila-079a-account-create-xdmnx" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.047851 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9d15ab92-82ab-47f1-b377-eec15c9c7b99-operator-scripts\") pod \"manila-079a-account-create-xdmnx\" (UID: \"9d15ab92-82ab-47f1-b377-eec15c9c7b99\") " pod="openstack/manila-079a-account-create-xdmnx" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.047871 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5sdgg\" (UniqueName: \"kubernetes.io/projected/935d1166-b0ca-4ffe-836a-7ab7278ca919-kube-api-access-5sdgg\") pod \"glance-default-internal-api-0\" (UID: \"935d1166-b0ca-4ffe-836a-7ab7278ca919\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.047894 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-54zw4\" (UniqueName: \"kubernetes.io/projected/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-kube-api-access-54zw4\") pod \"glance-default-external-api-0\" (UID: \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.047921 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/cd278622-9317-44c5-a14c-07df7a2bfab0-horizon-secret-key\") pod \"horizon-76d45dc6b9-tlnnp\" (UID: \"cd278622-9317-44c5-a14c-07df7a2bfab0\") " pod="openstack/horizon-76d45dc6b9-tlnnp" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.047941 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.047959 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/935d1166-b0ca-4ffe-836a-7ab7278ca919-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"935d1166-b0ca-4ffe-836a-7ab7278ca919\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.047976 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/935d1166-b0ca-4ffe-836a-7ab7278ca919-ceph\") pod \"glance-default-internal-api-0\" (UID: \"935d1166-b0ca-4ffe-836a-7ab7278ca919\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.047998 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/935d1166-b0ca-4ffe-836a-7ab7278ca919-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"935d1166-b0ca-4ffe-836a-7ab7278ca919\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.048015 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-logs\") pod \"glance-default-external-api-0\" (UID: \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.048034 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.048050 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-ceph\") pod \"glance-default-external-api-0\" (UID: \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.048070 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/935d1166-b0ca-4ffe-836a-7ab7278ca919-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"935d1166-b0ca-4ffe-836a-7ab7278ca919\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.048087 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.048109 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.058003 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/935d1166-b0ca-4ffe-836a-7ab7278ca919-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"935d1166-b0ca-4ffe-836a-7ab7278ca919\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.058433 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/935d1166-b0ca-4ffe-836a-7ab7278ca919-logs\") pod \"glance-default-internal-api-0\" (UID: \"935d1166-b0ca-4ffe-836a-7ab7278ca919\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.058482 4812 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"935d1166-b0ca-4ffe-836a-7ab7278ca919\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-internal-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.059668 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-r4w2f" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.061450 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9d15ab92-82ab-47f1-b377-eec15c9c7b99-operator-scripts\") pod \"manila-079a-account-create-xdmnx\" (UID: \"9d15ab92-82ab-47f1-b377-eec15c9c7b99\") " pod="openstack/manila-079a-account-create-xdmnx" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.064468 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/935d1166-b0ca-4ffe-836a-7ab7278ca919-ceph\") pod \"glance-default-internal-api-0\" (UID: \"935d1166-b0ca-4ffe-836a-7ab7278ca919\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.064678 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/935d1166-b0ca-4ffe-836a-7ab7278ca919-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"935d1166-b0ca-4ffe-836a-7ab7278ca919\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.064947 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/935d1166-b0ca-4ffe-836a-7ab7278ca919-scripts\") pod \"glance-default-internal-api-0\" (UID: \"935d1166-b0ca-4ffe-836a-7ab7278ca919\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.071157 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/935d1166-b0ca-4ffe-836a-7ab7278ca919-config-data\") pod \"glance-default-internal-api-0\" (UID: \"935d1166-b0ca-4ffe-836a-7ab7278ca919\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.088249 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/935d1166-b0ca-4ffe-836a-7ab7278ca919-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"935d1166-b0ca-4ffe-836a-7ab7278ca919\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.089116 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6748cb846f-77xjv" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.128271 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k9274\" (UniqueName: \"kubernetes.io/projected/9d15ab92-82ab-47f1-b377-eec15c9c7b99-kube-api-access-k9274\") pod \"manila-079a-account-create-xdmnx\" (UID: \"9d15ab92-82ab-47f1-b377-eec15c9c7b99\") " pod="openstack/manila-079a-account-create-xdmnx" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.135382 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5sdgg\" (UniqueName: \"kubernetes.io/projected/935d1166-b0ca-4ffe-836a-7ab7278ca919-kube-api-access-5sdgg\") pod \"glance-default-internal-api-0\" (UID: \"935d1166-b0ca-4ffe-836a-7ab7278ca919\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.135859 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-079a-account-create-xdmnx" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.140628 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"935d1166-b0ca-4ffe-836a-7ab7278ca919\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.149711 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/cd278622-9317-44c5-a14c-07df7a2bfab0-horizon-secret-key\") pod \"horizon-76d45dc6b9-tlnnp\" (UID: \"cd278622-9317-44c5-a14c-07df7a2bfab0\") " pod="openstack/horizon-76d45dc6b9-tlnnp" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.149762 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.149794 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-logs\") pod \"glance-default-external-api-0\" (UID: \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.149813 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.149829 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-ceph\") pod \"glance-default-external-api-0\" (UID: \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.149853 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.149872 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.149907 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd278622-9317-44c5-a14c-07df7a2bfab0-logs\") pod \"horizon-76d45dc6b9-tlnnp\" (UID: \"cd278622-9317-44c5-a14c-07df7a2bfab0\") " pod="openstack/horizon-76d45dc6b9-tlnnp" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.149922 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-scripts\") pod \"glance-default-external-api-0\" (UID: \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.149963 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-config-data\") pod \"glance-default-external-api-0\" (UID: \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.149995 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cd278622-9317-44c5-a14c-07df7a2bfab0-config-data\") pod \"horizon-76d45dc6b9-tlnnp\" (UID: \"cd278622-9317-44c5-a14c-07df7a2bfab0\") " pod="openstack/horizon-76d45dc6b9-tlnnp" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.150024 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbmk4\" (UniqueName: \"kubernetes.io/projected/cd278622-9317-44c5-a14c-07df7a2bfab0-kube-api-access-bbmk4\") pod \"horizon-76d45dc6b9-tlnnp\" (UID: \"cd278622-9317-44c5-a14c-07df7a2bfab0\") " pod="openstack/horizon-76d45dc6b9-tlnnp" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.150044 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cd278622-9317-44c5-a14c-07df7a2bfab0-scripts\") pod \"horizon-76d45dc6b9-tlnnp\" (UID: \"cd278622-9317-44c5-a14c-07df7a2bfab0\") " pod="openstack/horizon-76d45dc6b9-tlnnp" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.150071 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-54zw4\" (UniqueName: \"kubernetes.io/projected/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-kube-api-access-54zw4\") pod \"glance-default-external-api-0\" (UID: \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.151991 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd278622-9317-44c5-a14c-07df7a2bfab0-logs\") pod \"horizon-76d45dc6b9-tlnnp\" (UID: \"cd278622-9317-44c5-a14c-07df7a2bfab0\") " pod="openstack/horizon-76d45dc6b9-tlnnp" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.152057 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.152160 4812 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/glance-default-external-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.152583 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-logs\") pod \"glance-default-external-api-0\" (UID: \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.153322 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cd278622-9317-44c5-a14c-07df7a2bfab0-scripts\") pod \"horizon-76d45dc6b9-tlnnp\" (UID: \"cd278622-9317-44c5-a14c-07df7a2bfab0\") " pod="openstack/horizon-76d45dc6b9-tlnnp" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.154018 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.154418 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.156579 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/cd278622-9317-44c5-a14c-07df7a2bfab0-horizon-secret-key\") pod \"horizon-76d45dc6b9-tlnnp\" (UID: \"cd278622-9317-44c5-a14c-07df7a2bfab0\") " pod="openstack/horizon-76d45dc6b9-tlnnp" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.158555 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-scripts\") pod \"glance-default-external-api-0\" (UID: \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.158921 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-ceph\") pod \"glance-default-external-api-0\" (UID: \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.173187 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cd278622-9317-44c5-a14c-07df7a2bfab0-config-data\") pod \"horizon-76d45dc6b9-tlnnp\" (UID: \"cd278622-9317-44c5-a14c-07df7a2bfab0\") " pod="openstack/horizon-76d45dc6b9-tlnnp" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.177445 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.178082 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbmk4\" (UniqueName: \"kubernetes.io/projected/cd278622-9317-44c5-a14c-07df7a2bfab0-kube-api-access-bbmk4\") pod \"horizon-76d45dc6b9-tlnnp\" (UID: \"cd278622-9317-44c5-a14c-07df7a2bfab0\") " pod="openstack/horizon-76d45dc6b9-tlnnp" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.178174 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.181782 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-config-data\") pod \"glance-default-external-api-0\" (UID: \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.190930 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-54zw4\" (UniqueName: \"kubernetes.io/projected/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-kube-api-access-54zw4\") pod \"glance-default-external-api-0\" (UID: \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.237238 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-76d45dc6b9-tlnnp" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.281029 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.285152 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-volume1-0"] Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.554992 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.707320 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"aeb24c53-3056-44ac-bf12-e203596d2f63","Type":"ContainerStarted","Data":"925659dfdad8b2251f94334320c4074c41e22220e79213bf4c3d4b8ea637f9ce"} Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.708691 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"69e56055-e6c8-4ec1-8c22-1bbfb185086f","Type":"ContainerStarted","Data":"288753f95c2fd8604041bbc70314dc9ec8a4825ae48c83793b7bb900d1183c56"} Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.923516 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-079a-account-create-xdmnx"] Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.934871 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6748cb846f-77xjv"] Nov 25 17:36:37 crc kubenswrapper[4812]: I1125 17:36:37.946122 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-create-r4w2f"] Nov 25 17:36:37 crc kubenswrapper[4812]: W1125 17:36:37.986146 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc2bbc08d_477f_4dfb_82ba_1d9b1fd90f9e.slice/crio-d6cdb4bdbff5e3bd8c18f8120383b078dbf46dec9f6183d4bf48477ddcd1ad90 WatchSource:0}: Error finding container d6cdb4bdbff5e3bd8c18f8120383b078dbf46dec9f6183d4bf48477ddcd1ad90: Status 404 returned error can't find the container with id d6cdb4bdbff5e3bd8c18f8120383b078dbf46dec9f6183d4bf48477ddcd1ad90 Nov 25 17:36:38 crc kubenswrapper[4812]: I1125 17:36:38.031741 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-76d45dc6b9-tlnnp"] Nov 25 17:36:38 crc kubenswrapper[4812]: W1125 17:36:38.033380 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcd278622_9317_44c5_a14c_07df7a2bfab0.slice/crio-14616e716f6d4939c85a64020782057e72943069ed3e793b7993bf1b57beddf0 WatchSource:0}: Error finding container 14616e716f6d4939c85a64020782057e72943069ed3e793b7993bf1b57beddf0: Status 404 returned error can't find the container with id 14616e716f6d4939c85a64020782057e72943069ed3e793b7993bf1b57beddf0 Nov 25 17:36:38 crc kubenswrapper[4812]: I1125 17:36:38.062707 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 17:36:38 crc kubenswrapper[4812]: I1125 17:36:38.133518 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 17:36:38 crc kubenswrapper[4812]: W1125 17:36:38.282007 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poded9db7cc_9bf5_4463_9af7_976ea1bc9370.slice/crio-1e4e2b7045801a21ebd1007b2dd326ddb71e3e0aa32459e727eecb827a660ce7 WatchSource:0}: Error finding container 1e4e2b7045801a21ebd1007b2dd326ddb71e3e0aa32459e727eecb827a660ce7: Status 404 returned error can't find the container with id 1e4e2b7045801a21ebd1007b2dd326ddb71e3e0aa32459e727eecb827a660ce7 Nov 25 17:36:38 crc kubenswrapper[4812]: I1125 17:36:38.718041 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"935d1166-b0ca-4ffe-836a-7ab7278ca919","Type":"ContainerStarted","Data":"4b3017fb879e883de1bcf299e9e21efa060076528a4a185b5d17d9cd181b51e1"} Nov 25 17:36:38 crc kubenswrapper[4812]: I1125 17:36:38.720706 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-r4w2f" event={"ID":"2e556e3f-d3af-47df-9561-e93c12e281d1","Type":"ContainerStarted","Data":"a90acbac79902cdb52ef4fcea9ae5b11639ff143d7714083c5b50dadd7085c1b"} Nov 25 17:36:38 crc kubenswrapper[4812]: I1125 17:36:38.720729 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-r4w2f" event={"ID":"2e556e3f-d3af-47df-9561-e93c12e281d1","Type":"ContainerStarted","Data":"5d0bfd4f4f428a225d3898907f8b950666fb1a01d7b1086373d89a072621152c"} Nov 25 17:36:38 crc kubenswrapper[4812]: I1125 17:36:38.724826 4812 generic.go:334] "Generic (PLEG): container finished" podID="9d15ab92-82ab-47f1-b377-eec15c9c7b99" containerID="099dcb306ebd7860d265853c8073d64c57a696f223cc0e459a6acff1f77c255a" exitCode=0 Nov 25 17:36:38 crc kubenswrapper[4812]: I1125 17:36:38.724898 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-079a-account-create-xdmnx" event={"ID":"9d15ab92-82ab-47f1-b377-eec15c9c7b99","Type":"ContainerDied","Data":"099dcb306ebd7860d265853c8073d64c57a696f223cc0e459a6acff1f77c255a"} Nov 25 17:36:38 crc kubenswrapper[4812]: I1125 17:36:38.724921 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-079a-account-create-xdmnx" event={"ID":"9d15ab92-82ab-47f1-b377-eec15c9c7b99","Type":"ContainerStarted","Data":"3a404495794e0412f187b69cfa9e9d2e66dbd36a3b46342d570c69bebfcfb056"} Nov 25 17:36:38 crc kubenswrapper[4812]: I1125 17:36:38.729037 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ed9db7cc-9bf5-4463-9af7-976ea1bc9370","Type":"ContainerStarted","Data":"1e4e2b7045801a21ebd1007b2dd326ddb71e3e0aa32459e727eecb827a660ce7"} Nov 25 17:36:38 crc kubenswrapper[4812]: I1125 17:36:38.735670 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-76d45dc6b9-tlnnp" event={"ID":"cd278622-9317-44c5-a14c-07df7a2bfab0","Type":"ContainerStarted","Data":"14616e716f6d4939c85a64020782057e72943069ed3e793b7993bf1b57beddf0"} Nov 25 17:36:38 crc kubenswrapper[4812]: I1125 17:36:38.737693 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-db-create-r4w2f" podStartSLOduration=2.737672791 podStartE2EDuration="2.737672791s" podCreationTimestamp="2025-11-25 17:36:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:36:38.734306679 +0000 UTC m=+2973.574448774" watchObservedRunningTime="2025-11-25 17:36:38.737672791 +0000 UTC m=+2973.577814886" Nov 25 17:36:38 crc kubenswrapper[4812]: I1125 17:36:38.738028 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6748cb846f-77xjv" event={"ID":"c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e","Type":"ContainerStarted","Data":"d6cdb4bdbff5e3bd8c18f8120383b078dbf46dec9f6183d4bf48477ddcd1ad90"} Nov 25 17:36:38 crc kubenswrapper[4812]: I1125 17:36:38.908288 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6748cb846f-77xjv"] Nov 25 17:36:38 crc kubenswrapper[4812]: I1125 17:36:38.949891 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-56b9b89f86-mb9q5"] Nov 25 17:36:38 crc kubenswrapper[4812]: I1125 17:36:38.952471 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-56b9b89f86-mb9q5" Nov 25 17:36:38 crc kubenswrapper[4812]: I1125 17:36:38.954884 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Nov 25 17:36:38 crc kubenswrapper[4812]: I1125 17:36:38.967930 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.000707 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-56b9b89f86-mb9q5"] Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.011547 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-76d45dc6b9-tlnnp"] Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.021206 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/842381e2-aa1c-4a72-9db3-51bffd277741-config-data\") pod \"horizon-56b9b89f86-mb9q5\" (UID: \"842381e2-aa1c-4a72-9db3-51bffd277741\") " pod="openstack/horizon-56b9b89f86-mb9q5" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.021267 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/842381e2-aa1c-4a72-9db3-51bffd277741-scripts\") pod \"horizon-56b9b89f86-mb9q5\" (UID: \"842381e2-aa1c-4a72-9db3-51bffd277741\") " pod="openstack/horizon-56b9b89f86-mb9q5" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.021322 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/842381e2-aa1c-4a72-9db3-51bffd277741-horizon-secret-key\") pod \"horizon-56b9b89f86-mb9q5\" (UID: \"842381e2-aa1c-4a72-9db3-51bffd277741\") " pod="openstack/horizon-56b9b89f86-mb9q5" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.021341 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8xqvv\" (UniqueName: \"kubernetes.io/projected/842381e2-aa1c-4a72-9db3-51bffd277741-kube-api-access-8xqvv\") pod \"horizon-56b9b89f86-mb9q5\" (UID: \"842381e2-aa1c-4a72-9db3-51bffd277741\") " pod="openstack/horizon-56b9b89f86-mb9q5" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.021373 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/842381e2-aa1c-4a72-9db3-51bffd277741-horizon-tls-certs\") pod \"horizon-56b9b89f86-mb9q5\" (UID: \"842381e2-aa1c-4a72-9db3-51bffd277741\") " pod="openstack/horizon-56b9b89f86-mb9q5" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.021427 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/842381e2-aa1c-4a72-9db3-51bffd277741-combined-ca-bundle\") pod \"horizon-56b9b89f86-mb9q5\" (UID: \"842381e2-aa1c-4a72-9db3-51bffd277741\") " pod="openstack/horizon-56b9b89f86-mb9q5" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.021466 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/842381e2-aa1c-4a72-9db3-51bffd277741-logs\") pod \"horizon-56b9b89f86-mb9q5\" (UID: \"842381e2-aa1c-4a72-9db3-51bffd277741\") " pod="openstack/horizon-56b9b89f86-mb9q5" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.061786 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-67cb765896-fxd9b"] Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.063562 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-67cb765896-fxd9b" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.072270 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.082329 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-67cb765896-fxd9b"] Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.123687 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/842381e2-aa1c-4a72-9db3-51bffd277741-config-data\") pod \"horizon-56b9b89f86-mb9q5\" (UID: \"842381e2-aa1c-4a72-9db3-51bffd277741\") " pod="openstack/horizon-56b9b89f86-mb9q5" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.123991 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b30abba9-0f34-44e1-97c5-6babb1192606-combined-ca-bundle\") pod \"horizon-67cb765896-fxd9b\" (UID: \"b30abba9-0f34-44e1-97c5-6babb1192606\") " pod="openstack/horizon-67cb765896-fxd9b" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.124017 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b30abba9-0f34-44e1-97c5-6babb1192606-logs\") pod \"horizon-67cb765896-fxd9b\" (UID: \"b30abba9-0f34-44e1-97c5-6babb1192606\") " pod="openstack/horizon-67cb765896-fxd9b" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.124048 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/842381e2-aa1c-4a72-9db3-51bffd277741-scripts\") pod \"horizon-56b9b89f86-mb9q5\" (UID: \"842381e2-aa1c-4a72-9db3-51bffd277741\") " pod="openstack/horizon-56b9b89f86-mb9q5" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.124098 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/842381e2-aa1c-4a72-9db3-51bffd277741-horizon-secret-key\") pod \"horizon-56b9b89f86-mb9q5\" (UID: \"842381e2-aa1c-4a72-9db3-51bffd277741\") " pod="openstack/horizon-56b9b89f86-mb9q5" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.124115 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8xqvv\" (UniqueName: \"kubernetes.io/projected/842381e2-aa1c-4a72-9db3-51bffd277741-kube-api-access-8xqvv\") pod \"horizon-56b9b89f86-mb9q5\" (UID: \"842381e2-aa1c-4a72-9db3-51bffd277741\") " pod="openstack/horizon-56b9b89f86-mb9q5" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.124137 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b30abba9-0f34-44e1-97c5-6babb1192606-horizon-secret-key\") pod \"horizon-67cb765896-fxd9b\" (UID: \"b30abba9-0f34-44e1-97c5-6babb1192606\") " pod="openstack/horizon-67cb765896-fxd9b" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.124170 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/842381e2-aa1c-4a72-9db3-51bffd277741-horizon-tls-certs\") pod \"horizon-56b9b89f86-mb9q5\" (UID: \"842381e2-aa1c-4a72-9db3-51bffd277741\") " pod="openstack/horizon-56b9b89f86-mb9q5" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.124214 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b30abba9-0f34-44e1-97c5-6babb1192606-scripts\") pod \"horizon-67cb765896-fxd9b\" (UID: \"b30abba9-0f34-44e1-97c5-6babb1192606\") " pod="openstack/horizon-67cb765896-fxd9b" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.124232 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/b30abba9-0f34-44e1-97c5-6babb1192606-horizon-tls-certs\") pod \"horizon-67cb765896-fxd9b\" (UID: \"b30abba9-0f34-44e1-97c5-6babb1192606\") " pod="openstack/horizon-67cb765896-fxd9b" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.124256 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/842381e2-aa1c-4a72-9db3-51bffd277741-combined-ca-bundle\") pod \"horizon-56b9b89f86-mb9q5\" (UID: \"842381e2-aa1c-4a72-9db3-51bffd277741\") " pod="openstack/horizon-56b9b89f86-mb9q5" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.124277 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c6fnt\" (UniqueName: \"kubernetes.io/projected/b30abba9-0f34-44e1-97c5-6babb1192606-kube-api-access-c6fnt\") pod \"horizon-67cb765896-fxd9b\" (UID: \"b30abba9-0f34-44e1-97c5-6babb1192606\") " pod="openstack/horizon-67cb765896-fxd9b" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.124302 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b30abba9-0f34-44e1-97c5-6babb1192606-config-data\") pod \"horizon-67cb765896-fxd9b\" (UID: \"b30abba9-0f34-44e1-97c5-6babb1192606\") " pod="openstack/horizon-67cb765896-fxd9b" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.124323 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/842381e2-aa1c-4a72-9db3-51bffd277741-logs\") pod \"horizon-56b9b89f86-mb9q5\" (UID: \"842381e2-aa1c-4a72-9db3-51bffd277741\") " pod="openstack/horizon-56b9b89f86-mb9q5" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.124682 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/842381e2-aa1c-4a72-9db3-51bffd277741-logs\") pod \"horizon-56b9b89f86-mb9q5\" (UID: \"842381e2-aa1c-4a72-9db3-51bffd277741\") " pod="openstack/horizon-56b9b89f86-mb9q5" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.125994 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/842381e2-aa1c-4a72-9db3-51bffd277741-scripts\") pod \"horizon-56b9b89f86-mb9q5\" (UID: \"842381e2-aa1c-4a72-9db3-51bffd277741\") " pod="openstack/horizon-56b9b89f86-mb9q5" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.126600 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/842381e2-aa1c-4a72-9db3-51bffd277741-config-data\") pod \"horizon-56b9b89f86-mb9q5\" (UID: \"842381e2-aa1c-4a72-9db3-51bffd277741\") " pod="openstack/horizon-56b9b89f86-mb9q5" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.129125 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/842381e2-aa1c-4a72-9db3-51bffd277741-horizon-tls-certs\") pod \"horizon-56b9b89f86-mb9q5\" (UID: \"842381e2-aa1c-4a72-9db3-51bffd277741\") " pod="openstack/horizon-56b9b89f86-mb9q5" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.131357 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/842381e2-aa1c-4a72-9db3-51bffd277741-horizon-secret-key\") pod \"horizon-56b9b89f86-mb9q5\" (UID: \"842381e2-aa1c-4a72-9db3-51bffd277741\") " pod="openstack/horizon-56b9b89f86-mb9q5" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.131512 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/842381e2-aa1c-4a72-9db3-51bffd277741-combined-ca-bundle\") pod \"horizon-56b9b89f86-mb9q5\" (UID: \"842381e2-aa1c-4a72-9db3-51bffd277741\") " pod="openstack/horizon-56b9b89f86-mb9q5" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.143188 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8xqvv\" (UniqueName: \"kubernetes.io/projected/842381e2-aa1c-4a72-9db3-51bffd277741-kube-api-access-8xqvv\") pod \"horizon-56b9b89f86-mb9q5\" (UID: \"842381e2-aa1c-4a72-9db3-51bffd277741\") " pod="openstack/horizon-56b9b89f86-mb9q5" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.227082 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b30abba9-0f34-44e1-97c5-6babb1192606-combined-ca-bundle\") pod \"horizon-67cb765896-fxd9b\" (UID: \"b30abba9-0f34-44e1-97c5-6babb1192606\") " pod="openstack/horizon-67cb765896-fxd9b" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.227141 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b30abba9-0f34-44e1-97c5-6babb1192606-logs\") pod \"horizon-67cb765896-fxd9b\" (UID: \"b30abba9-0f34-44e1-97c5-6babb1192606\") " pod="openstack/horizon-67cb765896-fxd9b" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.227226 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b30abba9-0f34-44e1-97c5-6babb1192606-horizon-secret-key\") pod \"horizon-67cb765896-fxd9b\" (UID: \"b30abba9-0f34-44e1-97c5-6babb1192606\") " pod="openstack/horizon-67cb765896-fxd9b" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.227289 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b30abba9-0f34-44e1-97c5-6babb1192606-scripts\") pod \"horizon-67cb765896-fxd9b\" (UID: \"b30abba9-0f34-44e1-97c5-6babb1192606\") " pod="openstack/horizon-67cb765896-fxd9b" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.227310 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/b30abba9-0f34-44e1-97c5-6babb1192606-horizon-tls-certs\") pod \"horizon-67cb765896-fxd9b\" (UID: \"b30abba9-0f34-44e1-97c5-6babb1192606\") " pod="openstack/horizon-67cb765896-fxd9b" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.227348 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c6fnt\" (UniqueName: \"kubernetes.io/projected/b30abba9-0f34-44e1-97c5-6babb1192606-kube-api-access-c6fnt\") pod \"horizon-67cb765896-fxd9b\" (UID: \"b30abba9-0f34-44e1-97c5-6babb1192606\") " pod="openstack/horizon-67cb765896-fxd9b" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.227384 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b30abba9-0f34-44e1-97c5-6babb1192606-config-data\") pod \"horizon-67cb765896-fxd9b\" (UID: \"b30abba9-0f34-44e1-97c5-6babb1192606\") " pod="openstack/horizon-67cb765896-fxd9b" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.236657 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b30abba9-0f34-44e1-97c5-6babb1192606-scripts\") pod \"horizon-67cb765896-fxd9b\" (UID: \"b30abba9-0f34-44e1-97c5-6babb1192606\") " pod="openstack/horizon-67cb765896-fxd9b" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.243667 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b30abba9-0f34-44e1-97c5-6babb1192606-combined-ca-bundle\") pod \"horizon-67cb765896-fxd9b\" (UID: \"b30abba9-0f34-44e1-97c5-6babb1192606\") " pod="openstack/horizon-67cb765896-fxd9b" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.244195 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b30abba9-0f34-44e1-97c5-6babb1192606-horizon-secret-key\") pod \"horizon-67cb765896-fxd9b\" (UID: \"b30abba9-0f34-44e1-97c5-6babb1192606\") " pod="openstack/horizon-67cb765896-fxd9b" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.244863 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b30abba9-0f34-44e1-97c5-6babb1192606-logs\") pod \"horizon-67cb765896-fxd9b\" (UID: \"b30abba9-0f34-44e1-97c5-6babb1192606\") " pod="openstack/horizon-67cb765896-fxd9b" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.245303 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b30abba9-0f34-44e1-97c5-6babb1192606-config-data\") pod \"horizon-67cb765896-fxd9b\" (UID: \"b30abba9-0f34-44e1-97c5-6babb1192606\") " pod="openstack/horizon-67cb765896-fxd9b" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.246752 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/b30abba9-0f34-44e1-97c5-6babb1192606-horizon-tls-certs\") pod \"horizon-67cb765896-fxd9b\" (UID: \"b30abba9-0f34-44e1-97c5-6babb1192606\") " pod="openstack/horizon-67cb765896-fxd9b" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.270220 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c6fnt\" (UniqueName: \"kubernetes.io/projected/b30abba9-0f34-44e1-97c5-6babb1192606-kube-api-access-c6fnt\") pod \"horizon-67cb765896-fxd9b\" (UID: \"b30abba9-0f34-44e1-97c5-6babb1192606\") " pod="openstack/horizon-67cb765896-fxd9b" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.299051 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-56b9b89f86-mb9q5" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.550731 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-67cb765896-fxd9b" Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.761168 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"935d1166-b0ca-4ffe-836a-7ab7278ca919","Type":"ContainerStarted","Data":"f4c931af6c74c0588ade50243b8d2c6c4ff6a2dad7d50e8fb3cffb55178d0b7c"} Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.765025 4812 generic.go:334] "Generic (PLEG): container finished" podID="2e556e3f-d3af-47df-9561-e93c12e281d1" containerID="a90acbac79902cdb52ef4fcea9ae5b11639ff143d7714083c5b50dadd7085c1b" exitCode=0 Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.765118 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-r4w2f" event={"ID":"2e556e3f-d3af-47df-9561-e93c12e281d1","Type":"ContainerDied","Data":"a90acbac79902cdb52ef4fcea9ae5b11639ff143d7714083c5b50dadd7085c1b"} Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.769401 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ed9db7cc-9bf5-4463-9af7-976ea1bc9370","Type":"ContainerStarted","Data":"468001da887d89e412c2c59a06f1ecc741f90a3ace19ad273c83e261a21c5fe6"} Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.785970 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"aeb24c53-3056-44ac-bf12-e203596d2f63","Type":"ContainerStarted","Data":"8cc30cfc845b4d4e97a78232eb80c0a6e45c317c3dec38a61aefe4c4cca1c4fc"} Nov 25 17:36:39 crc kubenswrapper[4812]: W1125 17:36:39.871636 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod842381e2_aa1c_4a72_9db3_51bffd277741.slice/crio-9f85af0737f18e61a05991fab55904ee941745c4b8b505b847a808d8fa0c5ca4 WatchSource:0}: Error finding container 9f85af0737f18e61a05991fab55904ee941745c4b8b505b847a808d8fa0c5ca4: Status 404 returned error can't find the container with id 9f85af0737f18e61a05991fab55904ee941745c4b8b505b847a808d8fa0c5ca4 Nov 25 17:36:39 crc kubenswrapper[4812]: I1125 17:36:39.894249 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-56b9b89f86-mb9q5"] Nov 25 17:36:40 crc kubenswrapper[4812]: I1125 17:36:40.148381 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-079a-account-create-xdmnx" Nov 25 17:36:40 crc kubenswrapper[4812]: I1125 17:36:40.208688 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-67cb765896-fxd9b"] Nov 25 17:36:40 crc kubenswrapper[4812]: W1125 17:36:40.216744 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb30abba9_0f34_44e1_97c5_6babb1192606.slice/crio-040a19116d33db4e3aa0db691ef4d6de65bfa5aaa5a364bd4f25ddfe333ae0ab WatchSource:0}: Error finding container 040a19116d33db4e3aa0db691ef4d6de65bfa5aaa5a364bd4f25ddfe333ae0ab: Status 404 returned error can't find the container with id 040a19116d33db4e3aa0db691ef4d6de65bfa5aaa5a364bd4f25ddfe333ae0ab Nov 25 17:36:40 crc kubenswrapper[4812]: I1125 17:36:40.261209 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k9274\" (UniqueName: \"kubernetes.io/projected/9d15ab92-82ab-47f1-b377-eec15c9c7b99-kube-api-access-k9274\") pod \"9d15ab92-82ab-47f1-b377-eec15c9c7b99\" (UID: \"9d15ab92-82ab-47f1-b377-eec15c9c7b99\") " Nov 25 17:36:40 crc kubenswrapper[4812]: I1125 17:36:40.261821 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9d15ab92-82ab-47f1-b377-eec15c9c7b99-operator-scripts\") pod \"9d15ab92-82ab-47f1-b377-eec15c9c7b99\" (UID: \"9d15ab92-82ab-47f1-b377-eec15c9c7b99\") " Nov 25 17:36:40 crc kubenswrapper[4812]: I1125 17:36:40.263142 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d15ab92-82ab-47f1-b377-eec15c9c7b99-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9d15ab92-82ab-47f1-b377-eec15c9c7b99" (UID: "9d15ab92-82ab-47f1-b377-eec15c9c7b99"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:36:40 crc kubenswrapper[4812]: I1125 17:36:40.277823 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d15ab92-82ab-47f1-b377-eec15c9c7b99-kube-api-access-k9274" (OuterVolumeSpecName: "kube-api-access-k9274") pod "9d15ab92-82ab-47f1-b377-eec15c9c7b99" (UID: "9d15ab92-82ab-47f1-b377-eec15c9c7b99"). InnerVolumeSpecName "kube-api-access-k9274". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:36:40 crc kubenswrapper[4812]: I1125 17:36:40.374504 4812 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9d15ab92-82ab-47f1-b377-eec15c9c7b99-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:40 crc kubenswrapper[4812]: I1125 17:36:40.374560 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k9274\" (UniqueName: \"kubernetes.io/projected/9d15ab92-82ab-47f1-b377-eec15c9c7b99-kube-api-access-k9274\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:40 crc kubenswrapper[4812]: I1125 17:36:40.799825 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-079a-account-create-xdmnx" Nov 25 17:36:40 crc kubenswrapper[4812]: I1125 17:36:40.799808 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-079a-account-create-xdmnx" event={"ID":"9d15ab92-82ab-47f1-b377-eec15c9c7b99","Type":"ContainerDied","Data":"3a404495794e0412f187b69cfa9e9d2e66dbd36a3b46342d570c69bebfcfb056"} Nov 25 17:36:40 crc kubenswrapper[4812]: I1125 17:36:40.799960 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3a404495794e0412f187b69cfa9e9d2e66dbd36a3b46342d570c69bebfcfb056" Nov 25 17:36:40 crc kubenswrapper[4812]: I1125 17:36:40.807626 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ed9db7cc-9bf5-4463-9af7-976ea1bc9370","Type":"ContainerStarted","Data":"4f7920261c5bec07509e5acb81ba6aff2b7816a729bf1bbd43794d050c68121e"} Nov 25 17:36:40 crc kubenswrapper[4812]: I1125 17:36:40.807878 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="ed9db7cc-9bf5-4463-9af7-976ea1bc9370" containerName="glance-log" containerID="cri-o://468001da887d89e412c2c59a06f1ecc741f90a3ace19ad273c83e261a21c5fe6" gracePeriod=30 Nov 25 17:36:40 crc kubenswrapper[4812]: I1125 17:36:40.808902 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="ed9db7cc-9bf5-4463-9af7-976ea1bc9370" containerName="glance-httpd" containerID="cri-o://4f7920261c5bec07509e5acb81ba6aff2b7816a729bf1bbd43794d050c68121e" gracePeriod=30 Nov 25 17:36:40 crc kubenswrapper[4812]: I1125 17:36:40.817641 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-56b9b89f86-mb9q5" event={"ID":"842381e2-aa1c-4a72-9db3-51bffd277741","Type":"ContainerStarted","Data":"9f85af0737f18e61a05991fab55904ee941745c4b8b505b847a808d8fa0c5ca4"} Nov 25 17:36:40 crc kubenswrapper[4812]: I1125 17:36:40.822017 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"aeb24c53-3056-44ac-bf12-e203596d2f63","Type":"ContainerStarted","Data":"1dec8e0f8cc988467d7dc7543670803659bc6dc7008491cbc8c72fab31e9b328"} Nov 25 17:36:40 crc kubenswrapper[4812]: I1125 17:36:40.827407 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"69e56055-e6c8-4ec1-8c22-1bbfb185086f","Type":"ContainerStarted","Data":"46042b1f992bfa80a3452cab6efdce0063d0acc23c53010b58ed996456173871"} Nov 25 17:36:40 crc kubenswrapper[4812]: I1125 17:36:40.827846 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"69e56055-e6c8-4ec1-8c22-1bbfb185086f","Type":"ContainerStarted","Data":"0fedb3e5939a3fae37268446b3441994f423e64a940a511993f7d9e66d0dd11e"} Nov 25 17:36:40 crc kubenswrapper[4812]: I1125 17:36:40.831434 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-67cb765896-fxd9b" event={"ID":"b30abba9-0f34-44e1-97c5-6babb1192606","Type":"ContainerStarted","Data":"040a19116d33db4e3aa0db691ef4d6de65bfa5aaa5a364bd4f25ddfe333ae0ab"} Nov 25 17:36:40 crc kubenswrapper[4812]: I1125 17:36:40.838426 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.838410475 podStartE2EDuration="4.838410475s" podCreationTimestamp="2025-11-25 17:36:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:36:40.828768204 +0000 UTC m=+2975.668910309" watchObservedRunningTime="2025-11-25 17:36:40.838410475 +0000 UTC m=+2975.678552570" Nov 25 17:36:40 crc kubenswrapper[4812]: I1125 17:36:40.846031 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="935d1166-b0ca-4ffe-836a-7ab7278ca919" containerName="glance-log" containerID="cri-o://f4c931af6c74c0588ade50243b8d2c6c4ff6a2dad7d50e8fb3cffb55178d0b7c" gracePeriod=30 Nov 25 17:36:40 crc kubenswrapper[4812]: I1125 17:36:40.846652 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"935d1166-b0ca-4ffe-836a-7ab7278ca919","Type":"ContainerStarted","Data":"49091efdf4e51af26e33654df03f8656346400f81bcce0a1e0ada89478b66295"} Nov 25 17:36:40 crc kubenswrapper[4812]: I1125 17:36:40.846685 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="935d1166-b0ca-4ffe-836a-7ab7278ca919" containerName="glance-httpd" containerID="cri-o://49091efdf4e51af26e33654df03f8656346400f81bcce0a1e0ada89478b66295" gracePeriod=30 Nov 25 17:36:40 crc kubenswrapper[4812]: I1125 17:36:40.868150 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-volume-volume1-0" podStartSLOduration=3.145737558 podStartE2EDuration="4.868132392s" podCreationTimestamp="2025-11-25 17:36:36 +0000 UTC" firstStartedPulling="2025-11-25 17:36:37.312888181 +0000 UTC m=+2972.153030276" lastFinishedPulling="2025-11-25 17:36:39.035283015 +0000 UTC m=+2973.875425110" observedRunningTime="2025-11-25 17:36:40.859022625 +0000 UTC m=+2975.699164720" watchObservedRunningTime="2025-11-25 17:36:40.868132392 +0000 UTC m=+2975.708274487" Nov 25 17:36:40 crc kubenswrapper[4812]: I1125 17:36:40.881350 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-backup-0" podStartSLOduration=2.88588934 podStartE2EDuration="4.88133374s" podCreationTimestamp="2025-11-25 17:36:36 +0000 UTC" firstStartedPulling="2025-11-25 17:36:37.553319675 +0000 UTC m=+2972.393461770" lastFinishedPulling="2025-11-25 17:36:39.548764075 +0000 UTC m=+2974.388906170" observedRunningTime="2025-11-25 17:36:40.879925562 +0000 UTC m=+2975.720067657" watchObservedRunningTime="2025-11-25 17:36:40.88133374 +0000 UTC m=+2975.721475835" Nov 25 17:36:40 crc kubenswrapper[4812]: I1125 17:36:40.905225 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=4.905209278 podStartE2EDuration="4.905209278s" podCreationTimestamp="2025-11-25 17:36:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:36:40.899969666 +0000 UTC m=+2975.740111761" watchObservedRunningTime="2025-11-25 17:36:40.905209278 +0000 UTC m=+2975.745351373" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.218126 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-r4w2f" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.293370 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e556e3f-d3af-47df-9561-e93c12e281d1-operator-scripts\") pod \"2e556e3f-d3af-47df-9561-e93c12e281d1\" (UID: \"2e556e3f-d3af-47df-9561-e93c12e281d1\") " Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.294092 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wsmxp\" (UniqueName: \"kubernetes.io/projected/2e556e3f-d3af-47df-9561-e93c12e281d1-kube-api-access-wsmxp\") pod \"2e556e3f-d3af-47df-9561-e93c12e281d1\" (UID: \"2e556e3f-d3af-47df-9561-e93c12e281d1\") " Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.294749 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e556e3f-d3af-47df-9561-e93c12e281d1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2e556e3f-d3af-47df-9561-e93c12e281d1" (UID: "2e556e3f-d3af-47df-9561-e93c12e281d1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.294978 4812 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e556e3f-d3af-47df-9561-e93c12e281d1-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.302184 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e556e3f-d3af-47df-9561-e93c12e281d1-kube-api-access-wsmxp" (OuterVolumeSpecName: "kube-api-access-wsmxp") pod "2e556e3f-d3af-47df-9561-e93c12e281d1" (UID: "2e556e3f-d3af-47df-9561-e93c12e281d1"). InnerVolumeSpecName "kube-api-access-wsmxp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.396944 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wsmxp\" (UniqueName: \"kubernetes.io/projected/2e556e3f-d3af-47df-9561-e93c12e281d1-kube-api-access-wsmxp\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.449750 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.580137 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-backup-0" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.600805 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.703680 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-combined-ca-bundle\") pod \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\" (UID: \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\") " Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.703742 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\" (UID: \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\") " Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.703780 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-logs\") pod \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\" (UID: \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\") " Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.703850 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-public-tls-certs\") pod \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\" (UID: \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\") " Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.703884 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-scripts\") pod \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\" (UID: \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\") " Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.703901 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-httpd-run\") pod \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\" (UID: \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\") " Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.703961 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-config-data\") pod \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\" (UID: \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\") " Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.704030 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-54zw4\" (UniqueName: \"kubernetes.io/projected/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-kube-api-access-54zw4\") pod \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\" (UID: \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\") " Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.704057 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-ceph\") pod \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\" (UID: \"ed9db7cc-9bf5-4463-9af7-976ea1bc9370\") " Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.705430 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-logs" (OuterVolumeSpecName: "logs") pod "ed9db7cc-9bf5-4463-9af7-976ea1bc9370" (UID: "ed9db7cc-9bf5-4463-9af7-976ea1bc9370"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.705450 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "ed9db7cc-9bf5-4463-9af7-976ea1bc9370" (UID: "ed9db7cc-9bf5-4463-9af7-976ea1bc9370"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.709223 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-scripts" (OuterVolumeSpecName: "scripts") pod "ed9db7cc-9bf5-4463-9af7-976ea1bc9370" (UID: "ed9db7cc-9bf5-4463-9af7-976ea1bc9370"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.709621 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "glance") pod "ed9db7cc-9bf5-4463-9af7-976ea1bc9370" (UID: "ed9db7cc-9bf5-4463-9af7-976ea1bc9370"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.712026 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-kube-api-access-54zw4" (OuterVolumeSpecName: "kube-api-access-54zw4") pod "ed9db7cc-9bf5-4463-9af7-976ea1bc9370" (UID: "ed9db7cc-9bf5-4463-9af7-976ea1bc9370"). InnerVolumeSpecName "kube-api-access-54zw4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.714526 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-ceph" (OuterVolumeSpecName: "ceph") pod "ed9db7cc-9bf5-4463-9af7-976ea1bc9370" (UID: "ed9db7cc-9bf5-4463-9af7-976ea1bc9370"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.735290 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ed9db7cc-9bf5-4463-9af7-976ea1bc9370" (UID: "ed9db7cc-9bf5-4463-9af7-976ea1bc9370"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.756283 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.759876 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-config-data" (OuterVolumeSpecName: "config-data") pod "ed9db7cc-9bf5-4463-9af7-976ea1bc9370" (UID: "ed9db7cc-9bf5-4463-9af7-976ea1bc9370"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.767614 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "ed9db7cc-9bf5-4463-9af7-976ea1bc9370" (UID: "ed9db7cc-9bf5-4463-9af7-976ea1bc9370"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.805230 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/935d1166-b0ca-4ffe-836a-7ab7278ca919-ceph\") pod \"935d1166-b0ca-4ffe-836a-7ab7278ca919\" (UID: \"935d1166-b0ca-4ffe-836a-7ab7278ca919\") " Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.805292 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/935d1166-b0ca-4ffe-836a-7ab7278ca919-scripts\") pod \"935d1166-b0ca-4ffe-836a-7ab7278ca919\" (UID: \"935d1166-b0ca-4ffe-836a-7ab7278ca919\") " Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.805411 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5sdgg\" (UniqueName: \"kubernetes.io/projected/935d1166-b0ca-4ffe-836a-7ab7278ca919-kube-api-access-5sdgg\") pod \"935d1166-b0ca-4ffe-836a-7ab7278ca919\" (UID: \"935d1166-b0ca-4ffe-836a-7ab7278ca919\") " Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.805461 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/935d1166-b0ca-4ffe-836a-7ab7278ca919-httpd-run\") pod \"935d1166-b0ca-4ffe-836a-7ab7278ca919\" (UID: \"935d1166-b0ca-4ffe-836a-7ab7278ca919\") " Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.805478 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"935d1166-b0ca-4ffe-836a-7ab7278ca919\" (UID: \"935d1166-b0ca-4ffe-836a-7ab7278ca919\") " Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.805503 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/935d1166-b0ca-4ffe-836a-7ab7278ca919-config-data\") pod \"935d1166-b0ca-4ffe-836a-7ab7278ca919\" (UID: \"935d1166-b0ca-4ffe-836a-7ab7278ca919\") " Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.805528 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/935d1166-b0ca-4ffe-836a-7ab7278ca919-logs\") pod \"935d1166-b0ca-4ffe-836a-7ab7278ca919\" (UID: \"935d1166-b0ca-4ffe-836a-7ab7278ca919\") " Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.805593 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/935d1166-b0ca-4ffe-836a-7ab7278ca919-combined-ca-bundle\") pod \"935d1166-b0ca-4ffe-836a-7ab7278ca919\" (UID: \"935d1166-b0ca-4ffe-836a-7ab7278ca919\") " Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.806102 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/935d1166-b0ca-4ffe-836a-7ab7278ca919-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "935d1166-b0ca-4ffe-836a-7ab7278ca919" (UID: "935d1166-b0ca-4ffe-836a-7ab7278ca919"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.806156 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/935d1166-b0ca-4ffe-836a-7ab7278ca919-logs" (OuterVolumeSpecName: "logs") pod "935d1166-b0ca-4ffe-836a-7ab7278ca919" (UID: "935d1166-b0ca-4ffe-836a-7ab7278ca919"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.806209 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/935d1166-b0ca-4ffe-836a-7ab7278ca919-internal-tls-certs\") pod \"935d1166-b0ca-4ffe-836a-7ab7278ca919\" (UID: \"935d1166-b0ca-4ffe-836a-7ab7278ca919\") " Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.807027 4812 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.807044 4812 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.807052 4812 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/935d1166-b0ca-4ffe-836a-7ab7278ca919-logs\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.807061 4812 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.807072 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-54zw4\" (UniqueName: \"kubernetes.io/projected/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-kube-api-access-54zw4\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.807081 4812 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.807089 4812 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.807112 4812 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.807121 4812 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-logs\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.807141 4812 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ed9db7cc-9bf5-4463-9af7-976ea1bc9370-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.807150 4812 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/935d1166-b0ca-4ffe-836a-7ab7278ca919-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.808744 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "935d1166-b0ca-4ffe-836a-7ab7278ca919" (UID: "935d1166-b0ca-4ffe-836a-7ab7278ca919"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.810452 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/935d1166-b0ca-4ffe-836a-7ab7278ca919-kube-api-access-5sdgg" (OuterVolumeSpecName: "kube-api-access-5sdgg") pod "935d1166-b0ca-4ffe-836a-7ab7278ca919" (UID: "935d1166-b0ca-4ffe-836a-7ab7278ca919"). InnerVolumeSpecName "kube-api-access-5sdgg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.811224 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/935d1166-b0ca-4ffe-836a-7ab7278ca919-scripts" (OuterVolumeSpecName: "scripts") pod "935d1166-b0ca-4ffe-836a-7ab7278ca919" (UID: "935d1166-b0ca-4ffe-836a-7ab7278ca919"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.811480 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/935d1166-b0ca-4ffe-836a-7ab7278ca919-ceph" (OuterVolumeSpecName: "ceph") pod "935d1166-b0ca-4ffe-836a-7ab7278ca919" (UID: "935d1166-b0ca-4ffe-836a-7ab7278ca919"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.842684 4812 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.858701 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/935d1166-b0ca-4ffe-836a-7ab7278ca919-config-data" (OuterVolumeSpecName: "config-data") pod "935d1166-b0ca-4ffe-836a-7ab7278ca919" (UID: "935d1166-b0ca-4ffe-836a-7ab7278ca919"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.872099 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/935d1166-b0ca-4ffe-836a-7ab7278ca919-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "935d1166-b0ca-4ffe-836a-7ab7278ca919" (UID: "935d1166-b0ca-4ffe-836a-7ab7278ca919"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.880109 4812 generic.go:334] "Generic (PLEG): container finished" podID="935d1166-b0ca-4ffe-836a-7ab7278ca919" containerID="49091efdf4e51af26e33654df03f8656346400f81bcce0a1e0ada89478b66295" exitCode=0 Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.880142 4812 generic.go:334] "Generic (PLEG): container finished" podID="935d1166-b0ca-4ffe-836a-7ab7278ca919" containerID="f4c931af6c74c0588ade50243b8d2c6c4ff6a2dad7d50e8fb3cffb55178d0b7c" exitCode=143 Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.880202 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"935d1166-b0ca-4ffe-836a-7ab7278ca919","Type":"ContainerDied","Data":"49091efdf4e51af26e33654df03f8656346400f81bcce0a1e0ada89478b66295"} Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.880230 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"935d1166-b0ca-4ffe-836a-7ab7278ca919","Type":"ContainerDied","Data":"f4c931af6c74c0588ade50243b8d2c6c4ff6a2dad7d50e8fb3cffb55178d0b7c"} Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.880261 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"935d1166-b0ca-4ffe-836a-7ab7278ca919","Type":"ContainerDied","Data":"4b3017fb879e883de1bcf299e9e21efa060076528a4a185b5d17d9cd181b51e1"} Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.880277 4812 scope.go:117] "RemoveContainer" containerID="49091efdf4e51af26e33654df03f8656346400f81bcce0a1e0ada89478b66295" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.880391 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.885670 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/935d1166-b0ca-4ffe-836a-7ab7278ca919-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "935d1166-b0ca-4ffe-836a-7ab7278ca919" (UID: "935d1166-b0ca-4ffe-836a-7ab7278ca919"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.886344 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-r4w2f" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.886351 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-r4w2f" event={"ID":"2e556e3f-d3af-47df-9561-e93c12e281d1","Type":"ContainerDied","Data":"5d0bfd4f4f428a225d3898907f8b950666fb1a01d7b1086373d89a072621152c"} Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.886387 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5d0bfd4f4f428a225d3898907f8b950666fb1a01d7b1086373d89a072621152c" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.889775 4812 generic.go:334] "Generic (PLEG): container finished" podID="ed9db7cc-9bf5-4463-9af7-976ea1bc9370" containerID="4f7920261c5bec07509e5acb81ba6aff2b7816a729bf1bbd43794d050c68121e" exitCode=0 Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.889834 4812 generic.go:334] "Generic (PLEG): container finished" podID="ed9db7cc-9bf5-4463-9af7-976ea1bc9370" containerID="468001da887d89e412c2c59a06f1ecc741f90a3ace19ad273c83e261a21c5fe6" exitCode=143 Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.889884 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ed9db7cc-9bf5-4463-9af7-976ea1bc9370","Type":"ContainerDied","Data":"4f7920261c5bec07509e5acb81ba6aff2b7816a729bf1bbd43794d050c68121e"} Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.889915 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.889927 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ed9db7cc-9bf5-4463-9af7-976ea1bc9370","Type":"ContainerDied","Data":"468001da887d89e412c2c59a06f1ecc741f90a3ace19ad273c83e261a21c5fe6"} Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.889942 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ed9db7cc-9bf5-4463-9af7-976ea1bc9370","Type":"ContainerDied","Data":"1e4e2b7045801a21ebd1007b2dd326ddb71e3e0aa32459e727eecb827a660ce7"} Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.918795 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.925149 4812 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/935d1166-b0ca-4ffe-836a-7ab7278ca919-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.925205 4812 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/935d1166-b0ca-4ffe-836a-7ab7278ca919-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.925217 4812 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/935d1166-b0ca-4ffe-836a-7ab7278ca919-ceph\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.925227 4812 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/935d1166-b0ca-4ffe-836a-7ab7278ca919-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.925253 4812 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.925263 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5sdgg\" (UniqueName: \"kubernetes.io/projected/935d1166-b0ca-4ffe-836a-7ab7278ca919-kube-api-access-5sdgg\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.925309 4812 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.925337 4812 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/935d1166-b0ca-4ffe-836a-7ab7278ca919-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.929741 4812 scope.go:117] "RemoveContainer" containerID="f4c931af6c74c0588ade50243b8d2c6c4ff6a2dad7d50e8fb3cffb55178d0b7c" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.936193 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.950409 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 17:36:41 crc kubenswrapper[4812]: E1125 17:36:41.951027 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d15ab92-82ab-47f1-b377-eec15c9c7b99" containerName="mariadb-account-create" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.951056 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d15ab92-82ab-47f1-b377-eec15c9c7b99" containerName="mariadb-account-create" Nov 25 17:36:41 crc kubenswrapper[4812]: E1125 17:36:41.951098 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="935d1166-b0ca-4ffe-836a-7ab7278ca919" containerName="glance-log" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.951112 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="935d1166-b0ca-4ffe-836a-7ab7278ca919" containerName="glance-log" Nov 25 17:36:41 crc kubenswrapper[4812]: E1125 17:36:41.951138 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed9db7cc-9bf5-4463-9af7-976ea1bc9370" containerName="glance-log" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.951152 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed9db7cc-9bf5-4463-9af7-976ea1bc9370" containerName="glance-log" Nov 25 17:36:41 crc kubenswrapper[4812]: E1125 17:36:41.951188 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e556e3f-d3af-47df-9561-e93c12e281d1" containerName="mariadb-database-create" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.951201 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e556e3f-d3af-47df-9561-e93c12e281d1" containerName="mariadb-database-create" Nov 25 17:36:41 crc kubenswrapper[4812]: E1125 17:36:41.951222 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed9db7cc-9bf5-4463-9af7-976ea1bc9370" containerName="glance-httpd" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.951234 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed9db7cc-9bf5-4463-9af7-976ea1bc9370" containerName="glance-httpd" Nov 25 17:36:41 crc kubenswrapper[4812]: E1125 17:36:41.951271 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="935d1166-b0ca-4ffe-836a-7ab7278ca919" containerName="glance-httpd" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.951284 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="935d1166-b0ca-4ffe-836a-7ab7278ca919" containerName="glance-httpd" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.951555 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed9db7cc-9bf5-4463-9af7-976ea1bc9370" containerName="glance-log" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.951574 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="935d1166-b0ca-4ffe-836a-7ab7278ca919" containerName="glance-httpd" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.951584 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="935d1166-b0ca-4ffe-836a-7ab7278ca919" containerName="glance-log" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.951595 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e556e3f-d3af-47df-9561-e93c12e281d1" containerName="mariadb-database-create" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.951606 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d15ab92-82ab-47f1-b377-eec15c9c7b99" containerName="mariadb-account-create" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.951624 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed9db7cc-9bf5-4463-9af7-976ea1bc9370" containerName="glance-httpd" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.952554 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.954914 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.955135 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.955284 4812 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.971461 4812 scope.go:117] "RemoveContainer" containerID="49091efdf4e51af26e33654df03f8656346400f81bcce0a1e0ada89478b66295" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.971955 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 17:36:41 crc kubenswrapper[4812]: E1125 17:36:41.972137 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"49091efdf4e51af26e33654df03f8656346400f81bcce0a1e0ada89478b66295\": container with ID starting with 49091efdf4e51af26e33654df03f8656346400f81bcce0a1e0ada89478b66295 not found: ID does not exist" containerID="49091efdf4e51af26e33654df03f8656346400f81bcce0a1e0ada89478b66295" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.972194 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"49091efdf4e51af26e33654df03f8656346400f81bcce0a1e0ada89478b66295"} err="failed to get container status \"49091efdf4e51af26e33654df03f8656346400f81bcce0a1e0ada89478b66295\": rpc error: code = NotFound desc = could not find container \"49091efdf4e51af26e33654df03f8656346400f81bcce0a1e0ada89478b66295\": container with ID starting with 49091efdf4e51af26e33654df03f8656346400f81bcce0a1e0ada89478b66295 not found: ID does not exist" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.972220 4812 scope.go:117] "RemoveContainer" containerID="f4c931af6c74c0588ade50243b8d2c6c4ff6a2dad7d50e8fb3cffb55178d0b7c" Nov 25 17:36:41 crc kubenswrapper[4812]: E1125 17:36:41.972595 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f4c931af6c74c0588ade50243b8d2c6c4ff6a2dad7d50e8fb3cffb55178d0b7c\": container with ID starting with f4c931af6c74c0588ade50243b8d2c6c4ff6a2dad7d50e8fb3cffb55178d0b7c not found: ID does not exist" containerID="f4c931af6c74c0588ade50243b8d2c6c4ff6a2dad7d50e8fb3cffb55178d0b7c" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.972629 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f4c931af6c74c0588ade50243b8d2c6c4ff6a2dad7d50e8fb3cffb55178d0b7c"} err="failed to get container status \"f4c931af6c74c0588ade50243b8d2c6c4ff6a2dad7d50e8fb3cffb55178d0b7c\": rpc error: code = NotFound desc = could not find container \"f4c931af6c74c0588ade50243b8d2c6c4ff6a2dad7d50e8fb3cffb55178d0b7c\": container with ID starting with f4c931af6c74c0588ade50243b8d2c6c4ff6a2dad7d50e8fb3cffb55178d0b7c not found: ID does not exist" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.972654 4812 scope.go:117] "RemoveContainer" containerID="49091efdf4e51af26e33654df03f8656346400f81bcce0a1e0ada89478b66295" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.973013 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"49091efdf4e51af26e33654df03f8656346400f81bcce0a1e0ada89478b66295"} err="failed to get container status \"49091efdf4e51af26e33654df03f8656346400f81bcce0a1e0ada89478b66295\": rpc error: code = NotFound desc = could not find container \"49091efdf4e51af26e33654df03f8656346400f81bcce0a1e0ada89478b66295\": container with ID starting with 49091efdf4e51af26e33654df03f8656346400f81bcce0a1e0ada89478b66295 not found: ID does not exist" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.973046 4812 scope.go:117] "RemoveContainer" containerID="f4c931af6c74c0588ade50243b8d2c6c4ff6a2dad7d50e8fb3cffb55178d0b7c" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.973352 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f4c931af6c74c0588ade50243b8d2c6c4ff6a2dad7d50e8fb3cffb55178d0b7c"} err="failed to get container status \"f4c931af6c74c0588ade50243b8d2c6c4ff6a2dad7d50e8fb3cffb55178d0b7c\": rpc error: code = NotFound desc = could not find container \"f4c931af6c74c0588ade50243b8d2c6c4ff6a2dad7d50e8fb3cffb55178d0b7c\": container with ID starting with f4c931af6c74c0588ade50243b8d2c6c4ff6a2dad7d50e8fb3cffb55178d0b7c not found: ID does not exist" Nov 25 17:36:41 crc kubenswrapper[4812]: I1125 17:36:41.973372 4812 scope.go:117] "RemoveContainer" containerID="4f7920261c5bec07509e5acb81ba6aff2b7816a729bf1bbd43794d050c68121e" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.025761 4812 scope.go:117] "RemoveContainer" containerID="468001da887d89e412c2c59a06f1ecc741f90a3ace19ad273c83e261a21c5fe6" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.027148 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/3d792d35-8186-4ae6-b8f2-b3a1a9ff186a-ceph\") pod \"glance-default-external-api-0\" (UID: \"3d792d35-8186-4ae6-b8f2-b3a1a9ff186a\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.027209 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3d792d35-8186-4ae6-b8f2-b3a1a9ff186a-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"3d792d35-8186-4ae6-b8f2-b3a1a9ff186a\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.027398 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d792d35-8186-4ae6-b8f2-b3a1a9ff186a-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"3d792d35-8186-4ae6-b8f2-b3a1a9ff186a\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.027641 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d792d35-8186-4ae6-b8f2-b3a1a9ff186a-config-data\") pod \"glance-default-external-api-0\" (UID: \"3d792d35-8186-4ae6-b8f2-b3a1a9ff186a\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.027675 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tfjk7\" (UniqueName: \"kubernetes.io/projected/3d792d35-8186-4ae6-b8f2-b3a1a9ff186a-kube-api-access-tfjk7\") pod \"glance-default-external-api-0\" (UID: \"3d792d35-8186-4ae6-b8f2-b3a1a9ff186a\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.027700 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"3d792d35-8186-4ae6-b8f2-b3a1a9ff186a\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.027825 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3d792d35-8186-4ae6-b8f2-b3a1a9ff186a-logs\") pod \"glance-default-external-api-0\" (UID: \"3d792d35-8186-4ae6-b8f2-b3a1a9ff186a\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.027854 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d792d35-8186-4ae6-b8f2-b3a1a9ff186a-scripts\") pod \"glance-default-external-api-0\" (UID: \"3d792d35-8186-4ae6-b8f2-b3a1a9ff186a\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.027875 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d792d35-8186-4ae6-b8f2-b3a1a9ff186a-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"3d792d35-8186-4ae6-b8f2-b3a1a9ff186a\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.029829 4812 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.067184 4812 scope.go:117] "RemoveContainer" containerID="4f7920261c5bec07509e5acb81ba6aff2b7816a729bf1bbd43794d050c68121e" Nov 25 17:36:42 crc kubenswrapper[4812]: E1125 17:36:42.067844 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4f7920261c5bec07509e5acb81ba6aff2b7816a729bf1bbd43794d050c68121e\": container with ID starting with 4f7920261c5bec07509e5acb81ba6aff2b7816a729bf1bbd43794d050c68121e not found: ID does not exist" containerID="4f7920261c5bec07509e5acb81ba6aff2b7816a729bf1bbd43794d050c68121e" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.067886 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f7920261c5bec07509e5acb81ba6aff2b7816a729bf1bbd43794d050c68121e"} err="failed to get container status \"4f7920261c5bec07509e5acb81ba6aff2b7816a729bf1bbd43794d050c68121e\": rpc error: code = NotFound desc = could not find container \"4f7920261c5bec07509e5acb81ba6aff2b7816a729bf1bbd43794d050c68121e\": container with ID starting with 4f7920261c5bec07509e5acb81ba6aff2b7816a729bf1bbd43794d050c68121e not found: ID does not exist" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.067906 4812 scope.go:117] "RemoveContainer" containerID="468001da887d89e412c2c59a06f1ecc741f90a3ace19ad273c83e261a21c5fe6" Nov 25 17:36:42 crc kubenswrapper[4812]: E1125 17:36:42.068254 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"468001da887d89e412c2c59a06f1ecc741f90a3ace19ad273c83e261a21c5fe6\": container with ID starting with 468001da887d89e412c2c59a06f1ecc741f90a3ace19ad273c83e261a21c5fe6 not found: ID does not exist" containerID="468001da887d89e412c2c59a06f1ecc741f90a3ace19ad273c83e261a21c5fe6" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.068275 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"468001da887d89e412c2c59a06f1ecc741f90a3ace19ad273c83e261a21c5fe6"} err="failed to get container status \"468001da887d89e412c2c59a06f1ecc741f90a3ace19ad273c83e261a21c5fe6\": rpc error: code = NotFound desc = could not find container \"468001da887d89e412c2c59a06f1ecc741f90a3ace19ad273c83e261a21c5fe6\": container with ID starting with 468001da887d89e412c2c59a06f1ecc741f90a3ace19ad273c83e261a21c5fe6 not found: ID does not exist" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.068287 4812 scope.go:117] "RemoveContainer" containerID="4f7920261c5bec07509e5acb81ba6aff2b7816a729bf1bbd43794d050c68121e" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.068497 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f7920261c5bec07509e5acb81ba6aff2b7816a729bf1bbd43794d050c68121e"} err="failed to get container status \"4f7920261c5bec07509e5acb81ba6aff2b7816a729bf1bbd43794d050c68121e\": rpc error: code = NotFound desc = could not find container \"4f7920261c5bec07509e5acb81ba6aff2b7816a729bf1bbd43794d050c68121e\": container with ID starting with 4f7920261c5bec07509e5acb81ba6aff2b7816a729bf1bbd43794d050c68121e not found: ID does not exist" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.068520 4812 scope.go:117] "RemoveContainer" containerID="468001da887d89e412c2c59a06f1ecc741f90a3ace19ad273c83e261a21c5fe6" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.068739 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"468001da887d89e412c2c59a06f1ecc741f90a3ace19ad273c83e261a21c5fe6"} err="failed to get container status \"468001da887d89e412c2c59a06f1ecc741f90a3ace19ad273c83e261a21c5fe6\": rpc error: code = NotFound desc = could not find container \"468001da887d89e412c2c59a06f1ecc741f90a3ace19ad273c83e261a21c5fe6\": container with ID starting with 468001da887d89e412c2c59a06f1ecc741f90a3ace19ad273c83e261a21c5fe6 not found: ID does not exist" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.133439 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d792d35-8186-4ae6-b8f2-b3a1a9ff186a-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"3d792d35-8186-4ae6-b8f2-b3a1a9ff186a\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.133598 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d792d35-8186-4ae6-b8f2-b3a1a9ff186a-config-data\") pod \"glance-default-external-api-0\" (UID: \"3d792d35-8186-4ae6-b8f2-b3a1a9ff186a\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.133629 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tfjk7\" (UniqueName: \"kubernetes.io/projected/3d792d35-8186-4ae6-b8f2-b3a1a9ff186a-kube-api-access-tfjk7\") pod \"glance-default-external-api-0\" (UID: \"3d792d35-8186-4ae6-b8f2-b3a1a9ff186a\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.133655 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"3d792d35-8186-4ae6-b8f2-b3a1a9ff186a\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.133716 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3d792d35-8186-4ae6-b8f2-b3a1a9ff186a-logs\") pod \"glance-default-external-api-0\" (UID: \"3d792d35-8186-4ae6-b8f2-b3a1a9ff186a\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.133741 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d792d35-8186-4ae6-b8f2-b3a1a9ff186a-scripts\") pod \"glance-default-external-api-0\" (UID: \"3d792d35-8186-4ae6-b8f2-b3a1a9ff186a\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.133759 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d792d35-8186-4ae6-b8f2-b3a1a9ff186a-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"3d792d35-8186-4ae6-b8f2-b3a1a9ff186a\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.133831 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/3d792d35-8186-4ae6-b8f2-b3a1a9ff186a-ceph\") pod \"glance-default-external-api-0\" (UID: \"3d792d35-8186-4ae6-b8f2-b3a1a9ff186a\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.133861 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3d792d35-8186-4ae6-b8f2-b3a1a9ff186a-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"3d792d35-8186-4ae6-b8f2-b3a1a9ff186a\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.134424 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3d792d35-8186-4ae6-b8f2-b3a1a9ff186a-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"3d792d35-8186-4ae6-b8f2-b3a1a9ff186a\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.135958 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3d792d35-8186-4ae6-b8f2-b3a1a9ff186a-logs\") pod \"glance-default-external-api-0\" (UID: \"3d792d35-8186-4ae6-b8f2-b3a1a9ff186a\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.137199 4812 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"3d792d35-8186-4ae6-b8f2-b3a1a9ff186a\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/glance-default-external-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.146884 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d792d35-8186-4ae6-b8f2-b3a1a9ff186a-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"3d792d35-8186-4ae6-b8f2-b3a1a9ff186a\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.149315 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d792d35-8186-4ae6-b8f2-b3a1a9ff186a-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"3d792d35-8186-4ae6-b8f2-b3a1a9ff186a\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.152515 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d792d35-8186-4ae6-b8f2-b3a1a9ff186a-config-data\") pod \"glance-default-external-api-0\" (UID: \"3d792d35-8186-4ae6-b8f2-b3a1a9ff186a\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.169706 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d792d35-8186-4ae6-b8f2-b3a1a9ff186a-scripts\") pod \"glance-default-external-api-0\" (UID: \"3d792d35-8186-4ae6-b8f2-b3a1a9ff186a\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.176173 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/3d792d35-8186-4ae6-b8f2-b3a1a9ff186a-ceph\") pod \"glance-default-external-api-0\" (UID: \"3d792d35-8186-4ae6-b8f2-b3a1a9ff186a\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.181287 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tfjk7\" (UniqueName: \"kubernetes.io/projected/3d792d35-8186-4ae6-b8f2-b3a1a9ff186a-kube-api-access-tfjk7\") pod \"glance-default-external-api-0\" (UID: \"3d792d35-8186-4ae6-b8f2-b3a1a9ff186a\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.220434 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"3d792d35-8186-4ae6-b8f2-b3a1a9ff186a\") " pod="openstack/glance-default-external-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.230623 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.240507 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.269325 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.279952 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.281865 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.287794 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.293325 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.326136 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.438503 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6bebab7-31ed-4d96-8ef0-1f9bee276d42-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"f6bebab7-31ed-4d96-8ef0-1f9bee276d42\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.438680 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6bebab7-31ed-4d96-8ef0-1f9bee276d42-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f6bebab7-31ed-4d96-8ef0-1f9bee276d42\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.438749 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/f6bebab7-31ed-4d96-8ef0-1f9bee276d42-ceph\") pod \"glance-default-internal-api-0\" (UID: \"f6bebab7-31ed-4d96-8ef0-1f9bee276d42\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.438784 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f6bebab7-31ed-4d96-8ef0-1f9bee276d42-logs\") pod \"glance-default-internal-api-0\" (UID: \"f6bebab7-31ed-4d96-8ef0-1f9bee276d42\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.438825 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f6bebab7-31ed-4d96-8ef0-1f9bee276d42-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f6bebab7-31ed-4d96-8ef0-1f9bee276d42\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.438855 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6bebab7-31ed-4d96-8ef0-1f9bee276d42-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f6bebab7-31ed-4d96-8ef0-1f9bee276d42\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.438880 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"f6bebab7-31ed-4d96-8ef0-1f9bee276d42\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.438914 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xddxv\" (UniqueName: \"kubernetes.io/projected/f6bebab7-31ed-4d96-8ef0-1f9bee276d42-kube-api-access-xddxv\") pod \"glance-default-internal-api-0\" (UID: \"f6bebab7-31ed-4d96-8ef0-1f9bee276d42\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.438949 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f6bebab7-31ed-4d96-8ef0-1f9bee276d42-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f6bebab7-31ed-4d96-8ef0-1f9bee276d42\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.543475 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6bebab7-31ed-4d96-8ef0-1f9bee276d42-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f6bebab7-31ed-4d96-8ef0-1f9bee276d42\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.543623 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/f6bebab7-31ed-4d96-8ef0-1f9bee276d42-ceph\") pod \"glance-default-internal-api-0\" (UID: \"f6bebab7-31ed-4d96-8ef0-1f9bee276d42\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.543665 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f6bebab7-31ed-4d96-8ef0-1f9bee276d42-logs\") pod \"glance-default-internal-api-0\" (UID: \"f6bebab7-31ed-4d96-8ef0-1f9bee276d42\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.543707 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f6bebab7-31ed-4d96-8ef0-1f9bee276d42-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f6bebab7-31ed-4d96-8ef0-1f9bee276d42\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.544012 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6bebab7-31ed-4d96-8ef0-1f9bee276d42-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f6bebab7-31ed-4d96-8ef0-1f9bee276d42\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.544032 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"f6bebab7-31ed-4d96-8ef0-1f9bee276d42\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.544070 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xddxv\" (UniqueName: \"kubernetes.io/projected/f6bebab7-31ed-4d96-8ef0-1f9bee276d42-kube-api-access-xddxv\") pod \"glance-default-internal-api-0\" (UID: \"f6bebab7-31ed-4d96-8ef0-1f9bee276d42\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.544098 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f6bebab7-31ed-4d96-8ef0-1f9bee276d42-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f6bebab7-31ed-4d96-8ef0-1f9bee276d42\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.544125 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6bebab7-31ed-4d96-8ef0-1f9bee276d42-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"f6bebab7-31ed-4d96-8ef0-1f9bee276d42\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.545853 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f6bebab7-31ed-4d96-8ef0-1f9bee276d42-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f6bebab7-31ed-4d96-8ef0-1f9bee276d42\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.546362 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f6bebab7-31ed-4d96-8ef0-1f9bee276d42-logs\") pod \"glance-default-internal-api-0\" (UID: \"f6bebab7-31ed-4d96-8ef0-1f9bee276d42\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.546621 4812 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"f6bebab7-31ed-4d96-8ef0-1f9bee276d42\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-internal-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.557625 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6bebab7-31ed-4d96-8ef0-1f9bee276d42-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"f6bebab7-31ed-4d96-8ef0-1f9bee276d42\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.571764 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/f6bebab7-31ed-4d96-8ef0-1f9bee276d42-ceph\") pod \"glance-default-internal-api-0\" (UID: \"f6bebab7-31ed-4d96-8ef0-1f9bee276d42\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.571919 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xddxv\" (UniqueName: \"kubernetes.io/projected/f6bebab7-31ed-4d96-8ef0-1f9bee276d42-kube-api-access-xddxv\") pod \"glance-default-internal-api-0\" (UID: \"f6bebab7-31ed-4d96-8ef0-1f9bee276d42\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.574230 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6bebab7-31ed-4d96-8ef0-1f9bee276d42-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f6bebab7-31ed-4d96-8ef0-1f9bee276d42\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.575114 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6bebab7-31ed-4d96-8ef0-1f9bee276d42-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f6bebab7-31ed-4d96-8ef0-1f9bee276d42\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.589615 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f6bebab7-31ed-4d96-8ef0-1f9bee276d42-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f6bebab7-31ed-4d96-8ef0-1f9bee276d42\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.606852 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"f6bebab7-31ed-4d96-8ef0-1f9bee276d42\") " pod="openstack/glance-default-internal-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.895625 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 17:36:42 crc kubenswrapper[4812]: I1125 17:36:42.897272 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 17:36:43 crc kubenswrapper[4812]: I1125 17:36:43.841278 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="935d1166-b0ca-4ffe-836a-7ab7278ca919" path="/var/lib/kubelet/pods/935d1166-b0ca-4ffe-836a-7ab7278ca919/volumes" Nov 25 17:36:43 crc kubenswrapper[4812]: I1125 17:36:43.842499 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed9db7cc-9bf5-4463-9af7-976ea1bc9370" path="/var/lib/kubelet/pods/ed9db7cc-9bf5-4463-9af7-976ea1bc9370/volumes" Nov 25 17:36:46 crc kubenswrapper[4812]: I1125 17:36:46.642301 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-volume-volume1-0" Nov 25 17:36:46 crc kubenswrapper[4812]: I1125 17:36:46.862404 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-backup-0" Nov 25 17:36:47 crc kubenswrapper[4812]: I1125 17:36:47.092857 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-db-sync-9ctdm"] Nov 25 17:36:47 crc kubenswrapper[4812]: I1125 17:36:47.094395 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-9ctdm" Nov 25 17:36:47 crc kubenswrapper[4812]: I1125 17:36:47.104268 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-config-data" Nov 25 17:36:47 crc kubenswrapper[4812]: I1125 17:36:47.104369 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-manila-dockercfg-kfbck" Nov 25 17:36:47 crc kubenswrapper[4812]: I1125 17:36:47.141489 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-sync-9ctdm"] Nov 25 17:36:47 crc kubenswrapper[4812]: I1125 17:36:47.260979 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8a4c444-a96f-40ee-84ec-4bbd167d11c2-combined-ca-bundle\") pod \"manila-db-sync-9ctdm\" (UID: \"e8a4c444-a96f-40ee-84ec-4bbd167d11c2\") " pod="openstack/manila-db-sync-9ctdm" Nov 25 17:36:47 crc kubenswrapper[4812]: I1125 17:36:47.261097 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8a4c444-a96f-40ee-84ec-4bbd167d11c2-config-data\") pod \"manila-db-sync-9ctdm\" (UID: \"e8a4c444-a96f-40ee-84ec-4bbd167d11c2\") " pod="openstack/manila-db-sync-9ctdm" Nov 25 17:36:47 crc kubenswrapper[4812]: I1125 17:36:47.261155 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/e8a4c444-a96f-40ee-84ec-4bbd167d11c2-job-config-data\") pod \"manila-db-sync-9ctdm\" (UID: \"e8a4c444-a96f-40ee-84ec-4bbd167d11c2\") " pod="openstack/manila-db-sync-9ctdm" Nov 25 17:36:47 crc kubenswrapper[4812]: I1125 17:36:47.261190 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mxxpl\" (UniqueName: \"kubernetes.io/projected/e8a4c444-a96f-40ee-84ec-4bbd167d11c2-kube-api-access-mxxpl\") pod \"manila-db-sync-9ctdm\" (UID: \"e8a4c444-a96f-40ee-84ec-4bbd167d11c2\") " pod="openstack/manila-db-sync-9ctdm" Nov 25 17:36:47 crc kubenswrapper[4812]: I1125 17:36:47.363070 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8a4c444-a96f-40ee-84ec-4bbd167d11c2-combined-ca-bundle\") pod \"manila-db-sync-9ctdm\" (UID: \"e8a4c444-a96f-40ee-84ec-4bbd167d11c2\") " pod="openstack/manila-db-sync-9ctdm" Nov 25 17:36:47 crc kubenswrapper[4812]: I1125 17:36:47.363182 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8a4c444-a96f-40ee-84ec-4bbd167d11c2-config-data\") pod \"manila-db-sync-9ctdm\" (UID: \"e8a4c444-a96f-40ee-84ec-4bbd167d11c2\") " pod="openstack/manila-db-sync-9ctdm" Nov 25 17:36:47 crc kubenswrapper[4812]: I1125 17:36:47.363236 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/e8a4c444-a96f-40ee-84ec-4bbd167d11c2-job-config-data\") pod \"manila-db-sync-9ctdm\" (UID: \"e8a4c444-a96f-40ee-84ec-4bbd167d11c2\") " pod="openstack/manila-db-sync-9ctdm" Nov 25 17:36:47 crc kubenswrapper[4812]: I1125 17:36:47.363267 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mxxpl\" (UniqueName: \"kubernetes.io/projected/e8a4c444-a96f-40ee-84ec-4bbd167d11c2-kube-api-access-mxxpl\") pod \"manila-db-sync-9ctdm\" (UID: \"e8a4c444-a96f-40ee-84ec-4bbd167d11c2\") " pod="openstack/manila-db-sync-9ctdm" Nov 25 17:36:47 crc kubenswrapper[4812]: I1125 17:36:47.370711 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/e8a4c444-a96f-40ee-84ec-4bbd167d11c2-job-config-data\") pod \"manila-db-sync-9ctdm\" (UID: \"e8a4c444-a96f-40ee-84ec-4bbd167d11c2\") " pod="openstack/manila-db-sync-9ctdm" Nov 25 17:36:47 crc kubenswrapper[4812]: I1125 17:36:47.370938 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8a4c444-a96f-40ee-84ec-4bbd167d11c2-combined-ca-bundle\") pod \"manila-db-sync-9ctdm\" (UID: \"e8a4c444-a96f-40ee-84ec-4bbd167d11c2\") " pod="openstack/manila-db-sync-9ctdm" Nov 25 17:36:47 crc kubenswrapper[4812]: I1125 17:36:47.372272 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8a4c444-a96f-40ee-84ec-4bbd167d11c2-config-data\") pod \"manila-db-sync-9ctdm\" (UID: \"e8a4c444-a96f-40ee-84ec-4bbd167d11c2\") " pod="openstack/manila-db-sync-9ctdm" Nov 25 17:36:47 crc kubenswrapper[4812]: I1125 17:36:47.389466 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mxxpl\" (UniqueName: \"kubernetes.io/projected/e8a4c444-a96f-40ee-84ec-4bbd167d11c2-kube-api-access-mxxpl\") pod \"manila-db-sync-9ctdm\" (UID: \"e8a4c444-a96f-40ee-84ec-4bbd167d11c2\") " pod="openstack/manila-db-sync-9ctdm" Nov 25 17:36:47 crc kubenswrapper[4812]: I1125 17:36:47.439370 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-9ctdm" Nov 25 17:36:49 crc kubenswrapper[4812]: I1125 17:36:49.640359 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 17:36:49 crc kubenswrapper[4812]: W1125 17:36:49.663244 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf6bebab7_31ed_4d96_8ef0_1f9bee276d42.slice/crio-eaa8aab6e758a63640529b179e97f2875f4535ae921c64a3e99966f541918b56 WatchSource:0}: Error finding container eaa8aab6e758a63640529b179e97f2875f4535ae921c64a3e99966f541918b56: Status 404 returned error can't find the container with id eaa8aab6e758a63640529b179e97f2875f4535ae921c64a3e99966f541918b56 Nov 25 17:36:49 crc kubenswrapper[4812]: I1125 17:36:49.980488 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f6bebab7-31ed-4d96-8ef0-1f9bee276d42","Type":"ContainerStarted","Data":"eaa8aab6e758a63640529b179e97f2875f4535ae921c64a3e99966f541918b56"} Nov 25 17:36:49 crc kubenswrapper[4812]: I1125 17:36:49.984121 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-67cb765896-fxd9b" event={"ID":"b30abba9-0f34-44e1-97c5-6babb1192606","Type":"ContainerStarted","Data":"5fec46a65f2b065989bdad47586e528ea88994dfe3ee92bed86cecb73b16fa61"} Nov 25 17:36:49 crc kubenswrapper[4812]: I1125 17:36:49.984173 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-67cb765896-fxd9b" event={"ID":"b30abba9-0f34-44e1-97c5-6babb1192606","Type":"ContainerStarted","Data":"53012c0b50de2b174e6e65cc967e612ff7f4e3d4a4e67fa6c6dc0a5f6bfe86f5"} Nov 25 17:36:49 crc kubenswrapper[4812]: I1125 17:36:49.988621 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"3d792d35-8186-4ae6-b8f2-b3a1a9ff186a","Type":"ContainerStarted","Data":"d34b547a3e424eb528a8f309c6a15be7043634e4a22717c495dd6b75624fea8d"} Nov 25 17:36:49 crc kubenswrapper[4812]: I1125 17:36:49.988658 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"3d792d35-8186-4ae6-b8f2-b3a1a9ff186a","Type":"ContainerStarted","Data":"d049e312b15a1e7067feb0dfb6dc7fdd8223da835c6c6cc6b9a954f99b738e08"} Nov 25 17:36:49 crc kubenswrapper[4812]: I1125 17:36:49.990381 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-56b9b89f86-mb9q5" event={"ID":"842381e2-aa1c-4a72-9db3-51bffd277741","Type":"ContainerStarted","Data":"79aabd3f78f8666bdd980605f49b9f4c43c64e4d9069224cc6717d0fb00878f5"} Nov 25 17:36:49 crc kubenswrapper[4812]: I1125 17:36:49.990406 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-56b9b89f86-mb9q5" event={"ID":"842381e2-aa1c-4a72-9db3-51bffd277741","Type":"ContainerStarted","Data":"dc11d24fa10df7ada01e58433293390cdba4b7a79b7c797d3b8bb4f3f5b24c60"} Nov 25 17:36:50 crc kubenswrapper[4812]: I1125 17:36:49.996946 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-76d45dc6b9-tlnnp" event={"ID":"cd278622-9317-44c5-a14c-07df7a2bfab0","Type":"ContainerStarted","Data":"1c235315de55c1a6eedf883814e813c56c8ce63edc1021d0a9607c1ce3a3c716"} Nov 25 17:36:50 crc kubenswrapper[4812]: I1125 17:36:49.996987 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-76d45dc6b9-tlnnp" event={"ID":"cd278622-9317-44c5-a14c-07df7a2bfab0","Type":"ContainerStarted","Data":"0f5566b8b4b6ee0328e795a4c6faaeee9d0b5c880537d4c8f4d619410289397f"} Nov 25 17:36:50 crc kubenswrapper[4812]: I1125 17:36:49.997103 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-76d45dc6b9-tlnnp" podUID="cd278622-9317-44c5-a14c-07df7a2bfab0" containerName="horizon-log" containerID="cri-o://0f5566b8b4b6ee0328e795a4c6faaeee9d0b5c880537d4c8f4d619410289397f" gracePeriod=30 Nov 25 17:36:50 crc kubenswrapper[4812]: I1125 17:36:49.997363 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-76d45dc6b9-tlnnp" podUID="cd278622-9317-44c5-a14c-07df7a2bfab0" containerName="horizon" containerID="cri-o://1c235315de55c1a6eedf883814e813c56c8ce63edc1021d0a9607c1ce3a3c716" gracePeriod=30 Nov 25 17:36:50 crc kubenswrapper[4812]: I1125 17:36:49.999867 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6748cb846f-77xjv" event={"ID":"c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e","Type":"ContainerStarted","Data":"f898afaa62d10357c5786559a0cdeac3560d391f32ee2422cc78635aef758dba"} Nov 25 17:36:50 crc kubenswrapper[4812]: I1125 17:36:49.999889 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6748cb846f-77xjv" event={"ID":"c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e","Type":"ContainerStarted","Data":"6d749196c24ee77a83bbb4aba191b2370b2374e5a673a4f5fd90ae1de8250701"} Nov 25 17:36:50 crc kubenswrapper[4812]: I1125 17:36:49.999997 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-6748cb846f-77xjv" podUID="c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e" containerName="horizon-log" containerID="cri-o://6d749196c24ee77a83bbb4aba191b2370b2374e5a673a4f5fd90ae1de8250701" gracePeriod=30 Nov 25 17:36:50 crc kubenswrapper[4812]: I1125 17:36:50.000064 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-6748cb846f-77xjv" podUID="c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e" containerName="horizon" containerID="cri-o://f898afaa62d10357c5786559a0cdeac3560d391f32ee2422cc78635aef758dba" gracePeriod=30 Nov 25 17:36:50 crc kubenswrapper[4812]: I1125 17:36:50.044358 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-6748cb846f-77xjv" podStartSLOduration=2.931555475 podStartE2EDuration="14.044335915s" podCreationTimestamp="2025-11-25 17:36:36 +0000 UTC" firstStartedPulling="2025-11-25 17:36:37.992817056 +0000 UTC m=+2972.832959151" lastFinishedPulling="2025-11-25 17:36:49.105597496 +0000 UTC m=+2983.945739591" observedRunningTime="2025-11-25 17:36:50.037803347 +0000 UTC m=+2984.877945452" watchObservedRunningTime="2025-11-25 17:36:50.044335915 +0000 UTC m=+2984.884478010" Nov 25 17:36:50 crc kubenswrapper[4812]: I1125 17:36:50.044762 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-67cb765896-fxd9b" podStartSLOduration=3.055177675 podStartE2EDuration="12.044755416s" podCreationTimestamp="2025-11-25 17:36:38 +0000 UTC" firstStartedPulling="2025-11-25 17:36:40.219942705 +0000 UTC m=+2975.060084800" lastFinishedPulling="2025-11-25 17:36:49.209520446 +0000 UTC m=+2984.049662541" observedRunningTime="2025-11-25 17:36:50.014234709 +0000 UTC m=+2984.854376844" watchObservedRunningTime="2025-11-25 17:36:50.044755416 +0000 UTC m=+2984.884897511" Nov 25 17:36:50 crc kubenswrapper[4812]: I1125 17:36:50.084873 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-56b9b89f86-mb9q5" podStartSLOduration=2.7304311759999997 podStartE2EDuration="12.084837244s" podCreationTimestamp="2025-11-25 17:36:38 +0000 UTC" firstStartedPulling="2025-11-25 17:36:39.878613685 +0000 UTC m=+2974.718755780" lastFinishedPulling="2025-11-25 17:36:49.233019753 +0000 UTC m=+2984.073161848" observedRunningTime="2025-11-25 17:36:50.083319393 +0000 UTC m=+2984.923461488" watchObservedRunningTime="2025-11-25 17:36:50.084837244 +0000 UTC m=+2984.924979329" Nov 25 17:36:50 crc kubenswrapper[4812]: I1125 17:36:50.104685 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-76d45dc6b9-tlnnp" podStartSLOduration=3.014652403 podStartE2EDuration="14.104661792s" podCreationTimestamp="2025-11-25 17:36:36 +0000 UTC" firstStartedPulling="2025-11-25 17:36:38.035882957 +0000 UTC m=+2972.876025052" lastFinishedPulling="2025-11-25 17:36:49.125892326 +0000 UTC m=+2983.966034441" observedRunningTime="2025-11-25 17:36:50.06587073 +0000 UTC m=+2984.906012835" watchObservedRunningTime="2025-11-25 17:36:50.104661792 +0000 UTC m=+2984.944803887" Nov 25 17:36:50 crc kubenswrapper[4812]: I1125 17:36:50.584391 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-sync-9ctdm"] Nov 25 17:36:51 crc kubenswrapper[4812]: I1125 17:36:51.012398 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f6bebab7-31ed-4d96-8ef0-1f9bee276d42","Type":"ContainerStarted","Data":"0c2c9970937de16e5fd75a2b814fae6017b9ae789492858acb6bfce08636d1a3"} Nov 25 17:36:51 crc kubenswrapper[4812]: I1125 17:36:51.012777 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f6bebab7-31ed-4d96-8ef0-1f9bee276d42","Type":"ContainerStarted","Data":"a88ce932df3131717c6772e57c52126c9f38019d6ece17bad7ffcb6315a388cf"} Nov 25 17:36:51 crc kubenswrapper[4812]: I1125 17:36:51.016198 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"3d792d35-8186-4ae6-b8f2-b3a1a9ff186a","Type":"ContainerStarted","Data":"a4d1f923a39166ee83b74a973823942cfc35097f0f8aaf311aecc877db17edb0"} Nov 25 17:36:51 crc kubenswrapper[4812]: I1125 17:36:51.017777 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-9ctdm" event={"ID":"e8a4c444-a96f-40ee-84ec-4bbd167d11c2","Type":"ContainerStarted","Data":"acc429a6189a05cd578c8ac43fafb01b270b4e74c7312c54e624221f352fed40"} Nov 25 17:36:51 crc kubenswrapper[4812]: I1125 17:36:51.048544 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=9.04850581 podStartE2EDuration="9.04850581s" podCreationTimestamp="2025-11-25 17:36:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:36:51.037967334 +0000 UTC m=+2985.878109419" watchObservedRunningTime="2025-11-25 17:36:51.04850581 +0000 UTC m=+2985.888647905" Nov 25 17:36:52 crc kubenswrapper[4812]: I1125 17:36:52.327769 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 25 17:36:52 crc kubenswrapper[4812]: I1125 17:36:52.328045 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 25 17:36:52 crc kubenswrapper[4812]: I1125 17:36:52.365477 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 25 17:36:52 crc kubenswrapper[4812]: I1125 17:36:52.375409 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 25 17:36:52 crc kubenswrapper[4812]: I1125 17:36:52.387252 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=11.387230381 podStartE2EDuration="11.387230381s" podCreationTimestamp="2025-11-25 17:36:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:36:51.076308214 +0000 UTC m=+2985.916450299" watchObservedRunningTime="2025-11-25 17:36:52.387230381 +0000 UTC m=+2987.227372486" Nov 25 17:36:52 crc kubenswrapper[4812]: I1125 17:36:52.896683 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 25 17:36:52 crc kubenswrapper[4812]: I1125 17:36:52.897030 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 25 17:36:52 crc kubenswrapper[4812]: I1125 17:36:52.963305 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 25 17:36:52 crc kubenswrapper[4812]: I1125 17:36:52.982851 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 25 17:36:53 crc kubenswrapper[4812]: I1125 17:36:53.051108 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 17:36:53 crc kubenswrapper[4812]: I1125 17:36:53.051342 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 25 17:36:53 crc kubenswrapper[4812]: I1125 17:36:53.051412 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 25 17:36:53 crc kubenswrapper[4812]: I1125 17:36:53.051471 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 17:36:56 crc kubenswrapper[4812]: I1125 17:36:56.252225 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 25 17:36:57 crc kubenswrapper[4812]: I1125 17:36:57.090406 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-6748cb846f-77xjv" Nov 25 17:36:57 crc kubenswrapper[4812]: I1125 17:36:57.238137 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-76d45dc6b9-tlnnp" Nov 25 17:36:57 crc kubenswrapper[4812]: I1125 17:36:57.333290 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:36:57 crc kubenswrapper[4812]: I1125 17:36:57.333351 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:36:57 crc kubenswrapper[4812]: I1125 17:36:57.333449 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" Nov 25 17:36:57 crc kubenswrapper[4812]: I1125 17:36:57.334165 4812 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2ae50c0855798672cbbf542aacde517f5fc4d85f0598c9ce69441f2a863ae0cd"} pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 17:36:57 crc kubenswrapper[4812]: I1125 17:36:57.334221 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" containerID="cri-o://2ae50c0855798672cbbf542aacde517f5fc4d85f0598c9ce69441f2a863ae0cd" gracePeriod=600 Nov 25 17:36:57 crc kubenswrapper[4812]: E1125 17:36:57.465197 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:36:58 crc kubenswrapper[4812]: I1125 17:36:58.113440 4812 generic.go:334] "Generic (PLEG): container finished" podID="8ed911cf-2139-4b12-84ba-af635585ba29" containerID="2ae50c0855798672cbbf542aacde517f5fc4d85f0598c9ce69441f2a863ae0cd" exitCode=0 Nov 25 17:36:58 crc kubenswrapper[4812]: I1125 17:36:58.113550 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" event={"ID":"8ed911cf-2139-4b12-84ba-af635585ba29","Type":"ContainerDied","Data":"2ae50c0855798672cbbf542aacde517f5fc4d85f0598c9ce69441f2a863ae0cd"} Nov 25 17:36:58 crc kubenswrapper[4812]: I1125 17:36:58.113841 4812 scope.go:117] "RemoveContainer" containerID="4663c5c86743f0128022e03f1f9d0812eba97990871d15cc9bec70b7e56a04db" Nov 25 17:36:58 crc kubenswrapper[4812]: I1125 17:36:58.114487 4812 scope.go:117] "RemoveContainer" containerID="2ae50c0855798672cbbf542aacde517f5fc4d85f0598c9ce69441f2a863ae0cd" Nov 25 17:36:58 crc kubenswrapper[4812]: E1125 17:36:58.114783 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:36:58 crc kubenswrapper[4812]: I1125 17:36:58.117623 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-9ctdm" event={"ID":"e8a4c444-a96f-40ee-84ec-4bbd167d11c2","Type":"ContainerStarted","Data":"5cbaf4d0081c590f809a629c02693063b98c4a66896d33258c4b82bae09ddd98"} Nov 25 17:36:58 crc kubenswrapper[4812]: I1125 17:36:58.167908 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-db-sync-9ctdm" podStartSLOduration=4.407459439 podStartE2EDuration="11.16788621s" podCreationTimestamp="2025-11-25 17:36:47 +0000 UTC" firstStartedPulling="2025-11-25 17:36:50.587708298 +0000 UTC m=+2985.427850393" lastFinishedPulling="2025-11-25 17:36:57.348135069 +0000 UTC m=+2992.188277164" observedRunningTime="2025-11-25 17:36:58.160617643 +0000 UTC m=+2993.000759738" watchObservedRunningTime="2025-11-25 17:36:58.16788621 +0000 UTC m=+2993.008028305" Nov 25 17:36:58 crc kubenswrapper[4812]: I1125 17:36:58.211573 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 25 17:36:59 crc kubenswrapper[4812]: I1125 17:36:59.300613 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-56b9b89f86-mb9q5" Nov 25 17:36:59 crc kubenswrapper[4812]: I1125 17:36:59.300964 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-56b9b89f86-mb9q5" Nov 25 17:36:59 crc kubenswrapper[4812]: I1125 17:36:59.303208 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-56b9b89f86-mb9q5" podUID="842381e2-aa1c-4a72-9db3-51bffd277741" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.248:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.248:8443: connect: connection refused" Nov 25 17:36:59 crc kubenswrapper[4812]: I1125 17:36:59.551463 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-67cb765896-fxd9b" Nov 25 17:36:59 crc kubenswrapper[4812]: I1125 17:36:59.551610 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-67cb765896-fxd9b" Nov 25 17:36:59 crc kubenswrapper[4812]: I1125 17:36:59.553228 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-67cb765896-fxd9b" podUID="b30abba9-0f34-44e1-97c5-6babb1192606" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.249:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.249:8443: connect: connection refused" Nov 25 17:37:09 crc kubenswrapper[4812]: I1125 17:37:09.300511 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-56b9b89f86-mb9q5" podUID="842381e2-aa1c-4a72-9db3-51bffd277741" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.248:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.248:8443: connect: connection refused" Nov 25 17:37:09 crc kubenswrapper[4812]: I1125 17:37:09.551525 4812 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-67cb765896-fxd9b" podUID="b30abba9-0f34-44e1-97c5-6babb1192606" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.249:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.249:8443: connect: connection refused" Nov 25 17:37:12 crc kubenswrapper[4812]: I1125 17:37:12.832106 4812 scope.go:117] "RemoveContainer" containerID="2ae50c0855798672cbbf542aacde517f5fc4d85f0598c9ce69441f2a863ae0cd" Nov 25 17:37:12 crc kubenswrapper[4812]: E1125 17:37:12.832876 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:37:14 crc kubenswrapper[4812]: I1125 17:37:14.865680 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 25 17:37:14 crc kubenswrapper[4812]: I1125 17:37:14.892613 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.342422 4812 generic.go:334] "Generic (PLEG): container finished" podID="cd278622-9317-44c5-a14c-07df7a2bfab0" containerID="1c235315de55c1a6eedf883814e813c56c8ce63edc1021d0a9607c1ce3a3c716" exitCode=137 Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.342857 4812 generic.go:334] "Generic (PLEG): container finished" podID="cd278622-9317-44c5-a14c-07df7a2bfab0" containerID="0f5566b8b4b6ee0328e795a4c6faaeee9d0b5c880537d4c8f4d619410289397f" exitCode=137 Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.342927 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-76d45dc6b9-tlnnp" event={"ID":"cd278622-9317-44c5-a14c-07df7a2bfab0","Type":"ContainerDied","Data":"1c235315de55c1a6eedf883814e813c56c8ce63edc1021d0a9607c1ce3a3c716"} Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.343000 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-76d45dc6b9-tlnnp" event={"ID":"cd278622-9317-44c5-a14c-07df7a2bfab0","Type":"ContainerDied","Data":"0f5566b8b4b6ee0328e795a4c6faaeee9d0b5c880537d4c8f4d619410289397f"} Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.344745 4812 generic.go:334] "Generic (PLEG): container finished" podID="c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e" containerID="f898afaa62d10357c5786559a0cdeac3560d391f32ee2422cc78635aef758dba" exitCode=137 Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.344765 4812 generic.go:334] "Generic (PLEG): container finished" podID="c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e" containerID="6d749196c24ee77a83bbb4aba191b2370b2374e5a673a4f5fd90ae1de8250701" exitCode=137 Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.344781 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6748cb846f-77xjv" event={"ID":"c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e","Type":"ContainerDied","Data":"f898afaa62d10357c5786559a0cdeac3560d391f32ee2422cc78635aef758dba"} Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.344795 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6748cb846f-77xjv" event={"ID":"c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e","Type":"ContainerDied","Data":"6d749196c24ee77a83bbb4aba191b2370b2374e5a673a4f5fd90ae1de8250701"} Nov 25 17:37:20 crc kubenswrapper[4812]: E1125 17:37:20.386172 4812 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc2bbc08d_477f_4dfb_82ba_1d9b1fd90f9e.slice/crio-conmon-f898afaa62d10357c5786559a0cdeac3560d391f32ee2422cc78635aef758dba.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc2bbc08d_477f_4dfb_82ba_1d9b1fd90f9e.slice/crio-f898afaa62d10357c5786559a0cdeac3560d391f32ee2422cc78635aef758dba.scope\": RecentStats: unable to find data in memory cache]" Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.618229 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6748cb846f-77xjv" Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.623358 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-76d45dc6b9-tlnnp" Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.797553 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e-horizon-secret-key\") pod \"c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e\" (UID: \"c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e\") " Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.797722 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bbmk4\" (UniqueName: \"kubernetes.io/projected/cd278622-9317-44c5-a14c-07df7a2bfab0-kube-api-access-bbmk4\") pod \"cd278622-9317-44c5-a14c-07df7a2bfab0\" (UID: \"cd278622-9317-44c5-a14c-07df7a2bfab0\") " Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.797776 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cd278622-9317-44c5-a14c-07df7a2bfab0-scripts\") pod \"cd278622-9317-44c5-a14c-07df7a2bfab0\" (UID: \"cd278622-9317-44c5-a14c-07df7a2bfab0\") " Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.797801 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/cd278622-9317-44c5-a14c-07df7a2bfab0-horizon-secret-key\") pod \"cd278622-9317-44c5-a14c-07df7a2bfab0\" (UID: \"cd278622-9317-44c5-a14c-07df7a2bfab0\") " Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.797848 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd278622-9317-44c5-a14c-07df7a2bfab0-logs\") pod \"cd278622-9317-44c5-a14c-07df7a2bfab0\" (UID: \"cd278622-9317-44c5-a14c-07df7a2bfab0\") " Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.797874 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e-logs\") pod \"c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e\" (UID: \"c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e\") " Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.797938 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e-scripts\") pod \"c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e\" (UID: \"c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e\") " Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.797985 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cd278622-9317-44c5-a14c-07df7a2bfab0-config-data\") pod \"cd278622-9317-44c5-a14c-07df7a2bfab0\" (UID: \"cd278622-9317-44c5-a14c-07df7a2bfab0\") " Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.798016 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j6zzl\" (UniqueName: \"kubernetes.io/projected/c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e-kube-api-access-j6zzl\") pod \"c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e\" (UID: \"c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e\") " Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.798072 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e-config-data\") pod \"c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e\" (UID: \"c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e\") " Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.799647 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cd278622-9317-44c5-a14c-07df7a2bfab0-logs" (OuterVolumeSpecName: "logs") pod "cd278622-9317-44c5-a14c-07df7a2bfab0" (UID: "cd278622-9317-44c5-a14c-07df7a2bfab0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.799943 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e-logs" (OuterVolumeSpecName: "logs") pod "c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e" (UID: "c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.805725 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd278622-9317-44c5-a14c-07df7a2bfab0-kube-api-access-bbmk4" (OuterVolumeSpecName: "kube-api-access-bbmk4") pod "cd278622-9317-44c5-a14c-07df7a2bfab0" (UID: "cd278622-9317-44c5-a14c-07df7a2bfab0"). InnerVolumeSpecName "kube-api-access-bbmk4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.805752 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd278622-9317-44c5-a14c-07df7a2bfab0-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "cd278622-9317-44c5-a14c-07df7a2bfab0" (UID: "cd278622-9317-44c5-a14c-07df7a2bfab0"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.805810 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e-kube-api-access-j6zzl" (OuterVolumeSpecName: "kube-api-access-j6zzl") pod "c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e" (UID: "c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e"). InnerVolumeSpecName "kube-api-access-j6zzl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.808470 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e" (UID: "c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.827383 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cd278622-9317-44c5-a14c-07df7a2bfab0-scripts" (OuterVolumeSpecName: "scripts") pod "cd278622-9317-44c5-a14c-07df7a2bfab0" (UID: "cd278622-9317-44c5-a14c-07df7a2bfab0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.827781 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e-config-data" (OuterVolumeSpecName: "config-data") pod "c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e" (UID: "c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.828595 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e-scripts" (OuterVolumeSpecName: "scripts") pod "c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e" (UID: "c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.828732 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cd278622-9317-44c5-a14c-07df7a2bfab0-config-data" (OuterVolumeSpecName: "config-data") pod "cd278622-9317-44c5-a14c-07df7a2bfab0" (UID: "cd278622-9317-44c5-a14c-07df7a2bfab0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.904568 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bbmk4\" (UniqueName: \"kubernetes.io/projected/cd278622-9317-44c5-a14c-07df7a2bfab0-kube-api-access-bbmk4\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.904688 4812 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cd278622-9317-44c5-a14c-07df7a2bfab0-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.904722 4812 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/cd278622-9317-44c5-a14c-07df7a2bfab0-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.904747 4812 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd278622-9317-44c5-a14c-07df7a2bfab0-logs\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.904764 4812 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e-logs\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.904783 4812 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.904800 4812 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cd278622-9317-44c5-a14c-07df7a2bfab0-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.904818 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j6zzl\" (UniqueName: \"kubernetes.io/projected/c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e-kube-api-access-j6zzl\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.904838 4812 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:20 crc kubenswrapper[4812]: I1125 17:37:20.904855 4812 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:21 crc kubenswrapper[4812]: I1125 17:37:21.282355 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-56b9b89f86-mb9q5" Nov 25 17:37:21 crc kubenswrapper[4812]: I1125 17:37:21.356851 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-76d45dc6b9-tlnnp" event={"ID":"cd278622-9317-44c5-a14c-07df7a2bfab0","Type":"ContainerDied","Data":"14616e716f6d4939c85a64020782057e72943069ed3e793b7993bf1b57beddf0"} Nov 25 17:37:21 crc kubenswrapper[4812]: I1125 17:37:21.356877 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-76d45dc6b9-tlnnp" Nov 25 17:37:21 crc kubenswrapper[4812]: I1125 17:37:21.356930 4812 scope.go:117] "RemoveContainer" containerID="1c235315de55c1a6eedf883814e813c56c8ce63edc1021d0a9607c1ce3a3c716" Nov 25 17:37:21 crc kubenswrapper[4812]: I1125 17:37:21.360640 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6748cb846f-77xjv" event={"ID":"c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e","Type":"ContainerDied","Data":"d6cdb4bdbff5e3bd8c18f8120383b078dbf46dec9f6183d4bf48477ddcd1ad90"} Nov 25 17:37:21 crc kubenswrapper[4812]: I1125 17:37:21.360764 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6748cb846f-77xjv" Nov 25 17:37:21 crc kubenswrapper[4812]: I1125 17:37:21.414392 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-76d45dc6b9-tlnnp"] Nov 25 17:37:21 crc kubenswrapper[4812]: I1125 17:37:21.422588 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-76d45dc6b9-tlnnp"] Nov 25 17:37:21 crc kubenswrapper[4812]: I1125 17:37:21.433668 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6748cb846f-77xjv"] Nov 25 17:37:21 crc kubenswrapper[4812]: I1125 17:37:21.445595 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-6748cb846f-77xjv"] Nov 25 17:37:21 crc kubenswrapper[4812]: I1125 17:37:21.520043 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-67cb765896-fxd9b" Nov 25 17:37:21 crc kubenswrapper[4812]: I1125 17:37:21.567844 4812 scope.go:117] "RemoveContainer" containerID="0f5566b8b4b6ee0328e795a4c6faaeee9d0b5c880537d4c8f4d619410289397f" Nov 25 17:37:21 crc kubenswrapper[4812]: I1125 17:37:21.587441 4812 scope.go:117] "RemoveContainer" containerID="f898afaa62d10357c5786559a0cdeac3560d391f32ee2422cc78635aef758dba" Nov 25 17:37:21 crc kubenswrapper[4812]: I1125 17:37:21.760588 4812 scope.go:117] "RemoveContainer" containerID="6d749196c24ee77a83bbb4aba191b2370b2374e5a673a4f5fd90ae1de8250701" Nov 25 17:37:21 crc kubenswrapper[4812]: I1125 17:37:21.849982 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e" path="/var/lib/kubelet/pods/c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e/volumes" Nov 25 17:37:21 crc kubenswrapper[4812]: I1125 17:37:21.851275 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd278622-9317-44c5-a14c-07df7a2bfab0" path="/var/lib/kubelet/pods/cd278622-9317-44c5-a14c-07df7a2bfab0/volumes" Nov 25 17:37:22 crc kubenswrapper[4812]: I1125 17:37:22.370049 4812 generic.go:334] "Generic (PLEG): container finished" podID="e8a4c444-a96f-40ee-84ec-4bbd167d11c2" containerID="5cbaf4d0081c590f809a629c02693063b98c4a66896d33258c4b82bae09ddd98" exitCode=0 Nov 25 17:37:22 crc kubenswrapper[4812]: I1125 17:37:22.370282 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-9ctdm" event={"ID":"e8a4c444-a96f-40ee-84ec-4bbd167d11c2","Type":"ContainerDied","Data":"5cbaf4d0081c590f809a629c02693063b98c4a66896d33258c4b82bae09ddd98"} Nov 25 17:37:23 crc kubenswrapper[4812]: I1125 17:37:23.050798 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-56b9b89f86-mb9q5" Nov 25 17:37:23 crc kubenswrapper[4812]: I1125 17:37:23.215230 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-67cb765896-fxd9b" Nov 25 17:37:23 crc kubenswrapper[4812]: I1125 17:37:23.277355 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-56b9b89f86-mb9q5"] Nov 25 17:37:23 crc kubenswrapper[4812]: I1125 17:37:23.383273 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-56b9b89f86-mb9q5" podUID="842381e2-aa1c-4a72-9db3-51bffd277741" containerName="horizon-log" containerID="cri-o://dc11d24fa10df7ada01e58433293390cdba4b7a79b7c797d3b8bb4f3f5b24c60" gracePeriod=30 Nov 25 17:37:23 crc kubenswrapper[4812]: I1125 17:37:23.383544 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-56b9b89f86-mb9q5" podUID="842381e2-aa1c-4a72-9db3-51bffd277741" containerName="horizon" containerID="cri-o://79aabd3f78f8666bdd980605f49b9f4c43c64e4d9069224cc6717d0fb00878f5" gracePeriod=30 Nov 25 17:37:23 crc kubenswrapper[4812]: I1125 17:37:23.802335 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-9ctdm" Nov 25 17:37:23 crc kubenswrapper[4812]: I1125 17:37:23.964608 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/e8a4c444-a96f-40ee-84ec-4bbd167d11c2-job-config-data\") pod \"e8a4c444-a96f-40ee-84ec-4bbd167d11c2\" (UID: \"e8a4c444-a96f-40ee-84ec-4bbd167d11c2\") " Nov 25 17:37:23 crc kubenswrapper[4812]: I1125 17:37:23.964667 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mxxpl\" (UniqueName: \"kubernetes.io/projected/e8a4c444-a96f-40ee-84ec-4bbd167d11c2-kube-api-access-mxxpl\") pod \"e8a4c444-a96f-40ee-84ec-4bbd167d11c2\" (UID: \"e8a4c444-a96f-40ee-84ec-4bbd167d11c2\") " Nov 25 17:37:23 crc kubenswrapper[4812]: I1125 17:37:23.964843 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8a4c444-a96f-40ee-84ec-4bbd167d11c2-combined-ca-bundle\") pod \"e8a4c444-a96f-40ee-84ec-4bbd167d11c2\" (UID: \"e8a4c444-a96f-40ee-84ec-4bbd167d11c2\") " Nov 25 17:37:23 crc kubenswrapper[4812]: I1125 17:37:23.964909 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8a4c444-a96f-40ee-84ec-4bbd167d11c2-config-data\") pod \"e8a4c444-a96f-40ee-84ec-4bbd167d11c2\" (UID: \"e8a4c444-a96f-40ee-84ec-4bbd167d11c2\") " Nov 25 17:37:23 crc kubenswrapper[4812]: I1125 17:37:23.974849 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8a4c444-a96f-40ee-84ec-4bbd167d11c2-kube-api-access-mxxpl" (OuterVolumeSpecName: "kube-api-access-mxxpl") pod "e8a4c444-a96f-40ee-84ec-4bbd167d11c2" (UID: "e8a4c444-a96f-40ee-84ec-4bbd167d11c2"). InnerVolumeSpecName "kube-api-access-mxxpl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:37:23 crc kubenswrapper[4812]: I1125 17:37:23.975707 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8a4c444-a96f-40ee-84ec-4bbd167d11c2-job-config-data" (OuterVolumeSpecName: "job-config-data") pod "e8a4c444-a96f-40ee-84ec-4bbd167d11c2" (UID: "e8a4c444-a96f-40ee-84ec-4bbd167d11c2"). InnerVolumeSpecName "job-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:37:23 crc kubenswrapper[4812]: I1125 17:37:23.979863 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8a4c444-a96f-40ee-84ec-4bbd167d11c2-config-data" (OuterVolumeSpecName: "config-data") pod "e8a4c444-a96f-40ee-84ec-4bbd167d11c2" (UID: "e8a4c444-a96f-40ee-84ec-4bbd167d11c2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.011598 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8a4c444-a96f-40ee-84ec-4bbd167d11c2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e8a4c444-a96f-40ee-84ec-4bbd167d11c2" (UID: "e8a4c444-a96f-40ee-84ec-4bbd167d11c2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.067587 4812 reconciler_common.go:293] "Volume detached for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/e8a4c444-a96f-40ee-84ec-4bbd167d11c2-job-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.067624 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mxxpl\" (UniqueName: \"kubernetes.io/projected/e8a4c444-a96f-40ee-84ec-4bbd167d11c2-kube-api-access-mxxpl\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.067638 4812 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8a4c444-a96f-40ee-84ec-4bbd167d11c2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.067649 4812 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8a4c444-a96f-40ee-84ec-4bbd167d11c2-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.399842 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-9ctdm" event={"ID":"e8a4c444-a96f-40ee-84ec-4bbd167d11c2","Type":"ContainerDied","Data":"acc429a6189a05cd578c8ac43fafb01b270b4e74c7312c54e624221f352fed40"} Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.399891 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="acc429a6189a05cd578c8ac43fafb01b270b4e74c7312c54e624221f352fed40" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.401272 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-9ctdm" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.788188 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-scheduler-0"] Nov 25 17:37:24 crc kubenswrapper[4812]: E1125 17:37:24.788880 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8a4c444-a96f-40ee-84ec-4bbd167d11c2" containerName="manila-db-sync" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.788897 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8a4c444-a96f-40ee-84ec-4bbd167d11c2" containerName="manila-db-sync" Nov 25 17:37:24 crc kubenswrapper[4812]: E1125 17:37:24.788911 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e" containerName="horizon" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.788918 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e" containerName="horizon" Nov 25 17:37:24 crc kubenswrapper[4812]: E1125 17:37:24.788932 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd278622-9317-44c5-a14c-07df7a2bfab0" containerName="horizon-log" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.788938 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd278622-9317-44c5-a14c-07df7a2bfab0" containerName="horizon-log" Nov 25 17:37:24 crc kubenswrapper[4812]: E1125 17:37:24.788953 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd278622-9317-44c5-a14c-07df7a2bfab0" containerName="horizon" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.788958 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd278622-9317-44c5-a14c-07df7a2bfab0" containerName="horizon" Nov 25 17:37:24 crc kubenswrapper[4812]: E1125 17:37:24.788971 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e" containerName="horizon-log" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.788977 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e" containerName="horizon-log" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.789164 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd278622-9317-44c5-a14c-07df7a2bfab0" containerName="horizon" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.789173 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e" containerName="horizon" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.789184 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd278622-9317-44c5-a14c-07df7a2bfab0" containerName="horizon-log" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.789201 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8a4c444-a96f-40ee-84ec-4bbd167d11c2" containerName="manila-db-sync" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.789210 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2bbc08d-477f-4dfb-82ba-1d9b1fd90f9e" containerName="horizon-log" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.790356 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.797102 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881\") " pod="openstack/manila-scheduler-0" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.797166 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881\") " pod="openstack/manila-scheduler-0" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.797203 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881\") " pod="openstack/manila-scheduler-0" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.797241 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881-scripts\") pod \"manila-scheduler-0\" (UID: \"5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881\") " pod="openstack/manila-scheduler-0" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.797259 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881-config-data\") pod \"manila-scheduler-0\" (UID: \"5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881\") " pod="openstack/manila-scheduler-0" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.797317 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-shjlb\" (UniqueName: \"kubernetes.io/projected/5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881-kube-api-access-shjlb\") pod \"manila-scheduler-0\" (UID: \"5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881\") " pod="openstack/manila-scheduler-0" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.799173 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-manila-dockercfg-kfbck" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.799411 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-config-data" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.799614 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-scripts" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.799763 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-scheduler-config-data" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.830134 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-share-share1-0"] Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.831630 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.838677 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-share-share1-config-data" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.846911 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.872813 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.886998 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-76b5fdb995-t8l6h"] Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.888736 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76b5fdb995-t8l6h" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.899397 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881\") " pod="openstack/manila-scheduler-0" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.899661 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881\") " pod="openstack/manila-scheduler-0" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.899761 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881-scripts\") pod \"manila-scheduler-0\" (UID: \"5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881\") " pod="openstack/manila-scheduler-0" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.899844 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881-config-data\") pod \"manila-scheduler-0\" (UID: \"5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881\") " pod="openstack/manila-scheduler-0" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.899975 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-shjlb\" (UniqueName: \"kubernetes.io/projected/5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881-kube-api-access-shjlb\") pod \"manila-scheduler-0\" (UID: \"5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881\") " pod="openstack/manila-scheduler-0" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.900059 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881\") " pod="openstack/manila-scheduler-0" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.900921 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881\") " pod="openstack/manila-scheduler-0" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.908465 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881-config-data\") pod \"manila-scheduler-0\" (UID: \"5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881\") " pod="openstack/manila-scheduler-0" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.912306 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881\") " pod="openstack/manila-scheduler-0" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.921853 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881\") " pod="openstack/manila-scheduler-0" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.922292 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881-scripts\") pod \"manila-scheduler-0\" (UID: \"5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881\") " pod="openstack/manila-scheduler-0" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.925753 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-shjlb\" (UniqueName: \"kubernetes.io/projected/5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881-kube-api-access-shjlb\") pod \"manila-scheduler-0\" (UID: \"5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881\") " pod="openstack/manila-scheduler-0" Nov 25 17:37:24 crc kubenswrapper[4812]: I1125 17:37:24.925816 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-76b5fdb995-t8l6h"] Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.002309 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/db712262-75ce-42f5-ab11-c34ca04cf37a-dns-svc\") pod \"dnsmasq-dns-76b5fdb995-t8l6h\" (UID: \"db712262-75ce-42f5-ab11-c34ca04cf37a\") " pod="openstack/dnsmasq-dns-76b5fdb995-t8l6h" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.002386 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/db712262-75ce-42f5-ab11-c34ca04cf37a-ovsdbserver-sb\") pod \"dnsmasq-dns-76b5fdb995-t8l6h\" (UID: \"db712262-75ce-42f5-ab11-c34ca04cf37a\") " pod="openstack/dnsmasq-dns-76b5fdb995-t8l6h" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.002418 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db712262-75ce-42f5-ab11-c34ca04cf37a-config\") pod \"dnsmasq-dns-76b5fdb995-t8l6h\" (UID: \"db712262-75ce-42f5-ab11-c34ca04cf37a\") " pod="openstack/dnsmasq-dns-76b5fdb995-t8l6h" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.002447 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8af2d4e3-790f-4ab4-92e8-1c0a083b9531-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"8af2d4e3-790f-4ab4-92e8-1c0a083b9531\") " pod="openstack/manila-share-share1-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.002468 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/db712262-75ce-42f5-ab11-c34ca04cf37a-openstack-edpm-ipam\") pod \"dnsmasq-dns-76b5fdb995-t8l6h\" (UID: \"db712262-75ce-42f5-ab11-c34ca04cf37a\") " pod="openstack/dnsmasq-dns-76b5fdb995-t8l6h" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.002485 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/8af2d4e3-790f-4ab4-92e8-1c0a083b9531-ceph\") pod \"manila-share-share1-0\" (UID: \"8af2d4e3-790f-4ab4-92e8-1c0a083b9531\") " pod="openstack/manila-share-share1-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.002510 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8af2d4e3-790f-4ab4-92e8-1c0a083b9531-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"8af2d4e3-790f-4ab4-92e8-1c0a083b9531\") " pod="openstack/manila-share-share1-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.002544 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/db712262-75ce-42f5-ab11-c34ca04cf37a-ovsdbserver-nb\") pod \"dnsmasq-dns-76b5fdb995-t8l6h\" (UID: \"db712262-75ce-42f5-ab11-c34ca04cf37a\") " pod="openstack/dnsmasq-dns-76b5fdb995-t8l6h" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.002571 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/8af2d4e3-790f-4ab4-92e8-1c0a083b9531-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"8af2d4e3-790f-4ab4-92e8-1c0a083b9531\") " pod="openstack/manila-share-share1-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.002598 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8af2d4e3-790f-4ab4-92e8-1c0a083b9531-config-data\") pod \"manila-share-share1-0\" (UID: \"8af2d4e3-790f-4ab4-92e8-1c0a083b9531\") " pod="openstack/manila-share-share1-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.002626 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q5xlq\" (UniqueName: \"kubernetes.io/projected/8af2d4e3-790f-4ab4-92e8-1c0a083b9531-kube-api-access-q5xlq\") pod \"manila-share-share1-0\" (UID: \"8af2d4e3-790f-4ab4-92e8-1c0a083b9531\") " pod="openstack/manila-share-share1-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.002657 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8af2d4e3-790f-4ab4-92e8-1c0a083b9531-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"8af2d4e3-790f-4ab4-92e8-1c0a083b9531\") " pod="openstack/manila-share-share1-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.002688 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9x58r\" (UniqueName: \"kubernetes.io/projected/db712262-75ce-42f5-ab11-c34ca04cf37a-kube-api-access-9x58r\") pod \"dnsmasq-dns-76b5fdb995-t8l6h\" (UID: \"db712262-75ce-42f5-ab11-c34ca04cf37a\") " pod="openstack/dnsmasq-dns-76b5fdb995-t8l6h" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.002709 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8af2d4e3-790f-4ab4-92e8-1c0a083b9531-scripts\") pod \"manila-share-share1-0\" (UID: \"8af2d4e3-790f-4ab4-92e8-1c0a083b9531\") " pod="openstack/manila-share-share1-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.029417 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-api-0"] Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.035269 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.037746 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-api-config-data" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.041190 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.104179 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8af2d4e3-790f-4ab4-92e8-1c0a083b9531-scripts\") pod \"manila-share-share1-0\" (UID: \"8af2d4e3-790f-4ab4-92e8-1c0a083b9531\") " pod="openstack/manila-share-share1-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.104243 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/db712262-75ce-42f5-ab11-c34ca04cf37a-dns-svc\") pod \"dnsmasq-dns-76b5fdb995-t8l6h\" (UID: \"db712262-75ce-42f5-ab11-c34ca04cf37a\") " pod="openstack/dnsmasq-dns-76b5fdb995-t8l6h" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.104291 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/db712262-75ce-42f5-ab11-c34ca04cf37a-ovsdbserver-sb\") pod \"dnsmasq-dns-76b5fdb995-t8l6h\" (UID: \"db712262-75ce-42f5-ab11-c34ca04cf37a\") " pod="openstack/dnsmasq-dns-76b5fdb995-t8l6h" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.104311 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db712262-75ce-42f5-ab11-c34ca04cf37a-config\") pod \"dnsmasq-dns-76b5fdb995-t8l6h\" (UID: \"db712262-75ce-42f5-ab11-c34ca04cf37a\") " pod="openstack/dnsmasq-dns-76b5fdb995-t8l6h" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.104341 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8af2d4e3-790f-4ab4-92e8-1c0a083b9531-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"8af2d4e3-790f-4ab4-92e8-1c0a083b9531\") " pod="openstack/manila-share-share1-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.104363 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/db712262-75ce-42f5-ab11-c34ca04cf37a-openstack-edpm-ipam\") pod \"dnsmasq-dns-76b5fdb995-t8l6h\" (UID: \"db712262-75ce-42f5-ab11-c34ca04cf37a\") " pod="openstack/dnsmasq-dns-76b5fdb995-t8l6h" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.104380 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/8af2d4e3-790f-4ab4-92e8-1c0a083b9531-ceph\") pod \"manila-share-share1-0\" (UID: \"8af2d4e3-790f-4ab4-92e8-1c0a083b9531\") " pod="openstack/manila-share-share1-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.104405 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8af2d4e3-790f-4ab4-92e8-1c0a083b9531-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"8af2d4e3-790f-4ab4-92e8-1c0a083b9531\") " pod="openstack/manila-share-share1-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.104426 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/db712262-75ce-42f5-ab11-c34ca04cf37a-ovsdbserver-nb\") pod \"dnsmasq-dns-76b5fdb995-t8l6h\" (UID: \"db712262-75ce-42f5-ab11-c34ca04cf37a\") " pod="openstack/dnsmasq-dns-76b5fdb995-t8l6h" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.104455 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/8af2d4e3-790f-4ab4-92e8-1c0a083b9531-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"8af2d4e3-790f-4ab4-92e8-1c0a083b9531\") " pod="openstack/manila-share-share1-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.104481 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8af2d4e3-790f-4ab4-92e8-1c0a083b9531-config-data\") pod \"manila-share-share1-0\" (UID: \"8af2d4e3-790f-4ab4-92e8-1c0a083b9531\") " pod="openstack/manila-share-share1-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.104513 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q5xlq\" (UniqueName: \"kubernetes.io/projected/8af2d4e3-790f-4ab4-92e8-1c0a083b9531-kube-api-access-q5xlq\") pod \"manila-share-share1-0\" (UID: \"8af2d4e3-790f-4ab4-92e8-1c0a083b9531\") " pod="openstack/manila-share-share1-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.104596 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8af2d4e3-790f-4ab4-92e8-1c0a083b9531-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"8af2d4e3-790f-4ab4-92e8-1c0a083b9531\") " pod="openstack/manila-share-share1-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.104634 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9x58r\" (UniqueName: \"kubernetes.io/projected/db712262-75ce-42f5-ab11-c34ca04cf37a-kube-api-access-9x58r\") pod \"dnsmasq-dns-76b5fdb995-t8l6h\" (UID: \"db712262-75ce-42f5-ab11-c34ca04cf37a\") " pod="openstack/dnsmasq-dns-76b5fdb995-t8l6h" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.105154 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/db712262-75ce-42f5-ab11-c34ca04cf37a-dns-svc\") pod \"dnsmasq-dns-76b5fdb995-t8l6h\" (UID: \"db712262-75ce-42f5-ab11-c34ca04cf37a\") " pod="openstack/dnsmasq-dns-76b5fdb995-t8l6h" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.105218 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8af2d4e3-790f-4ab4-92e8-1c0a083b9531-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"8af2d4e3-790f-4ab4-92e8-1c0a083b9531\") " pod="openstack/manila-share-share1-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.105780 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/db712262-75ce-42f5-ab11-c34ca04cf37a-ovsdbserver-nb\") pod \"dnsmasq-dns-76b5fdb995-t8l6h\" (UID: \"db712262-75ce-42f5-ab11-c34ca04cf37a\") " pod="openstack/dnsmasq-dns-76b5fdb995-t8l6h" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.105829 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/db712262-75ce-42f5-ab11-c34ca04cf37a-ovsdbserver-sb\") pod \"dnsmasq-dns-76b5fdb995-t8l6h\" (UID: \"db712262-75ce-42f5-ab11-c34ca04cf37a\") " pod="openstack/dnsmasq-dns-76b5fdb995-t8l6h" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.105895 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/8af2d4e3-790f-4ab4-92e8-1c0a083b9531-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"8af2d4e3-790f-4ab4-92e8-1c0a083b9531\") " pod="openstack/manila-share-share1-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.106364 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db712262-75ce-42f5-ab11-c34ca04cf37a-config\") pod \"dnsmasq-dns-76b5fdb995-t8l6h\" (UID: \"db712262-75ce-42f5-ab11-c34ca04cf37a\") " pod="openstack/dnsmasq-dns-76b5fdb995-t8l6h" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.108072 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8af2d4e3-790f-4ab4-92e8-1c0a083b9531-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"8af2d4e3-790f-4ab4-92e8-1c0a083b9531\") " pod="openstack/manila-share-share1-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.109193 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8af2d4e3-790f-4ab4-92e8-1c0a083b9531-scripts\") pod \"manila-share-share1-0\" (UID: \"8af2d4e3-790f-4ab4-92e8-1c0a083b9531\") " pod="openstack/manila-share-share1-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.110153 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8af2d4e3-790f-4ab4-92e8-1c0a083b9531-config-data\") pod \"manila-share-share1-0\" (UID: \"8af2d4e3-790f-4ab4-92e8-1c0a083b9531\") " pod="openstack/manila-share-share1-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.110258 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/db712262-75ce-42f5-ab11-c34ca04cf37a-openstack-edpm-ipam\") pod \"dnsmasq-dns-76b5fdb995-t8l6h\" (UID: \"db712262-75ce-42f5-ab11-c34ca04cf37a\") " pod="openstack/dnsmasq-dns-76b5fdb995-t8l6h" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.112191 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/8af2d4e3-790f-4ab4-92e8-1c0a083b9531-ceph\") pod \"manila-share-share1-0\" (UID: \"8af2d4e3-790f-4ab4-92e8-1c0a083b9531\") " pod="openstack/manila-share-share1-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.112438 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8af2d4e3-790f-4ab4-92e8-1c0a083b9531-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"8af2d4e3-790f-4ab4-92e8-1c0a083b9531\") " pod="openstack/manila-share-share1-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.121032 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9x58r\" (UniqueName: \"kubernetes.io/projected/db712262-75ce-42f5-ab11-c34ca04cf37a-kube-api-access-9x58r\") pod \"dnsmasq-dns-76b5fdb995-t8l6h\" (UID: \"db712262-75ce-42f5-ab11-c34ca04cf37a\") " pod="openstack/dnsmasq-dns-76b5fdb995-t8l6h" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.122201 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.123195 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q5xlq\" (UniqueName: \"kubernetes.io/projected/8af2d4e3-790f-4ab4-92e8-1c0a083b9531-kube-api-access-q5xlq\") pod \"manila-share-share1-0\" (UID: \"8af2d4e3-790f-4ab4-92e8-1c0a083b9531\") " pod="openstack/manila-share-share1-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.163262 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.208608 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3748e190-c778-4c84-bd41-4b7dac8e569a-config-data-custom\") pod \"manila-api-0\" (UID: \"3748e190-c778-4c84-bd41-4b7dac8e569a\") " pod="openstack/manila-api-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.208827 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wfkqd\" (UniqueName: \"kubernetes.io/projected/3748e190-c778-4c84-bd41-4b7dac8e569a-kube-api-access-wfkqd\") pod \"manila-api-0\" (UID: \"3748e190-c778-4c84-bd41-4b7dac8e569a\") " pod="openstack/manila-api-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.208913 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3748e190-c778-4c84-bd41-4b7dac8e569a-scripts\") pod \"manila-api-0\" (UID: \"3748e190-c778-4c84-bd41-4b7dac8e569a\") " pod="openstack/manila-api-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.209021 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3748e190-c778-4c84-bd41-4b7dac8e569a-config-data\") pod \"manila-api-0\" (UID: \"3748e190-c778-4c84-bd41-4b7dac8e569a\") " pod="openstack/manila-api-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.209188 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3748e190-c778-4c84-bd41-4b7dac8e569a-logs\") pod \"manila-api-0\" (UID: \"3748e190-c778-4c84-bd41-4b7dac8e569a\") " pod="openstack/manila-api-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.209270 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3748e190-c778-4c84-bd41-4b7dac8e569a-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"3748e190-c778-4c84-bd41-4b7dac8e569a\") " pod="openstack/manila-api-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.209384 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3748e190-c778-4c84-bd41-4b7dac8e569a-etc-machine-id\") pod \"manila-api-0\" (UID: \"3748e190-c778-4c84-bd41-4b7dac8e569a\") " pod="openstack/manila-api-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.211036 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76b5fdb995-t8l6h" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.312659 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3748e190-c778-4c84-bd41-4b7dac8e569a-scripts\") pod \"manila-api-0\" (UID: \"3748e190-c778-4c84-bd41-4b7dac8e569a\") " pod="openstack/manila-api-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.312970 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3748e190-c778-4c84-bd41-4b7dac8e569a-config-data\") pod \"manila-api-0\" (UID: \"3748e190-c778-4c84-bd41-4b7dac8e569a\") " pod="openstack/manila-api-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.313035 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3748e190-c778-4c84-bd41-4b7dac8e569a-logs\") pod \"manila-api-0\" (UID: \"3748e190-c778-4c84-bd41-4b7dac8e569a\") " pod="openstack/manila-api-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.313052 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3748e190-c778-4c84-bd41-4b7dac8e569a-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"3748e190-c778-4c84-bd41-4b7dac8e569a\") " pod="openstack/manila-api-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.313086 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3748e190-c778-4c84-bd41-4b7dac8e569a-etc-machine-id\") pod \"manila-api-0\" (UID: \"3748e190-c778-4c84-bd41-4b7dac8e569a\") " pod="openstack/manila-api-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.313119 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3748e190-c778-4c84-bd41-4b7dac8e569a-config-data-custom\") pod \"manila-api-0\" (UID: \"3748e190-c778-4c84-bd41-4b7dac8e569a\") " pod="openstack/manila-api-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.313155 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wfkqd\" (UniqueName: \"kubernetes.io/projected/3748e190-c778-4c84-bd41-4b7dac8e569a-kube-api-access-wfkqd\") pod \"manila-api-0\" (UID: \"3748e190-c778-4c84-bd41-4b7dac8e569a\") " pod="openstack/manila-api-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.316070 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3748e190-c778-4c84-bd41-4b7dac8e569a-etc-machine-id\") pod \"manila-api-0\" (UID: \"3748e190-c778-4c84-bd41-4b7dac8e569a\") " pod="openstack/manila-api-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.316591 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3748e190-c778-4c84-bd41-4b7dac8e569a-logs\") pod \"manila-api-0\" (UID: \"3748e190-c778-4c84-bd41-4b7dac8e569a\") " pod="openstack/manila-api-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.317018 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3748e190-c778-4c84-bd41-4b7dac8e569a-config-data\") pod \"manila-api-0\" (UID: \"3748e190-c778-4c84-bd41-4b7dac8e569a\") " pod="openstack/manila-api-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.320169 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3748e190-c778-4c84-bd41-4b7dac8e569a-scripts\") pod \"manila-api-0\" (UID: \"3748e190-c778-4c84-bd41-4b7dac8e569a\") " pod="openstack/manila-api-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.323094 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3748e190-c778-4c84-bd41-4b7dac8e569a-config-data-custom\") pod \"manila-api-0\" (UID: \"3748e190-c778-4c84-bd41-4b7dac8e569a\") " pod="openstack/manila-api-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.326454 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3748e190-c778-4c84-bd41-4b7dac8e569a-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"3748e190-c778-4c84-bd41-4b7dac8e569a\") " pod="openstack/manila-api-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.329500 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wfkqd\" (UniqueName: \"kubernetes.io/projected/3748e190-c778-4c84-bd41-4b7dac8e569a-kube-api-access-wfkqd\") pod \"manila-api-0\" (UID: \"3748e190-c778-4c84-bd41-4b7dac8e569a\") " pod="openstack/manila-api-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.357547 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.666967 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.678406 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Nov 25 17:37:25 crc kubenswrapper[4812]: I1125 17:37:25.891796 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-76b5fdb995-t8l6h"] Nov 25 17:37:26 crc kubenswrapper[4812]: I1125 17:37:26.097987 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Nov 25 17:37:26 crc kubenswrapper[4812]: I1125 17:37:26.423495 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881","Type":"ContainerStarted","Data":"b30e254363dd7157fa014893dc73ad667891de79a8efc32b5e955604fa256c12"} Nov 25 17:37:26 crc kubenswrapper[4812]: I1125 17:37:26.425688 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"3748e190-c778-4c84-bd41-4b7dac8e569a","Type":"ContainerStarted","Data":"e6b41810822b9e15b24d7cef42678c69aad0958bbe2e215ba239162a5111c3ae"} Nov 25 17:37:26 crc kubenswrapper[4812]: I1125 17:37:26.427155 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"8af2d4e3-790f-4ab4-92e8-1c0a083b9531","Type":"ContainerStarted","Data":"ea8d64a3e6f4738ebd4a7a253bf5bbd6e2e2da1d1f1e664171a7c322cfc45e66"} Nov 25 17:37:26 crc kubenswrapper[4812]: I1125 17:37:26.428904 4812 generic.go:334] "Generic (PLEG): container finished" podID="db712262-75ce-42f5-ab11-c34ca04cf37a" containerID="7570fb986fee05b5231eb05fed1cd2eec938213172a94916da1880bc2adef083" exitCode=0 Nov 25 17:37:26 crc kubenswrapper[4812]: I1125 17:37:26.428959 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76b5fdb995-t8l6h" event={"ID":"db712262-75ce-42f5-ab11-c34ca04cf37a","Type":"ContainerDied","Data":"7570fb986fee05b5231eb05fed1cd2eec938213172a94916da1880bc2adef083"} Nov 25 17:37:26 crc kubenswrapper[4812]: I1125 17:37:26.428991 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76b5fdb995-t8l6h" event={"ID":"db712262-75ce-42f5-ab11-c34ca04cf37a","Type":"ContainerStarted","Data":"530713b171b52894615189ad4b6b1f18789d7684fdea8eb9104592d641ac3712"} Nov 25 17:37:26 crc kubenswrapper[4812]: I1125 17:37:26.831637 4812 scope.go:117] "RemoveContainer" containerID="2ae50c0855798672cbbf542aacde517f5fc4d85f0598c9ce69441f2a863ae0cd" Nov 25 17:37:26 crc kubenswrapper[4812]: E1125 17:37:26.832140 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:37:27 crc kubenswrapper[4812]: I1125 17:37:27.404389 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-api-0"] Nov 25 17:37:27 crc kubenswrapper[4812]: I1125 17:37:27.440505 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76b5fdb995-t8l6h" event={"ID":"db712262-75ce-42f5-ab11-c34ca04cf37a","Type":"ContainerStarted","Data":"59aeda8a28e467dc8ef01731e7bc5c787aa583145c2d9424d047307bf4f0bd8f"} Nov 25 17:37:27 crc kubenswrapper[4812]: I1125 17:37:27.440655 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-76b5fdb995-t8l6h" Nov 25 17:37:27 crc kubenswrapper[4812]: I1125 17:37:27.450224 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881","Type":"ContainerStarted","Data":"bd5ffdea50931bd87fa1123e68e12e3776f8470ba757a8f0a9fd203890c37813"} Nov 25 17:37:27 crc kubenswrapper[4812]: I1125 17:37:27.454155 4812 generic.go:334] "Generic (PLEG): container finished" podID="842381e2-aa1c-4a72-9db3-51bffd277741" containerID="79aabd3f78f8666bdd980605f49b9f4c43c64e4d9069224cc6717d0fb00878f5" exitCode=0 Nov 25 17:37:27 crc kubenswrapper[4812]: I1125 17:37:27.454222 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-56b9b89f86-mb9q5" event={"ID":"842381e2-aa1c-4a72-9db3-51bffd277741","Type":"ContainerDied","Data":"79aabd3f78f8666bdd980605f49b9f4c43c64e4d9069224cc6717d0fb00878f5"} Nov 25 17:37:27 crc kubenswrapper[4812]: I1125 17:37:27.456983 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"3748e190-c778-4c84-bd41-4b7dac8e569a","Type":"ContainerStarted","Data":"6fc9fe85adde81a8a4c26d57da999756782dfd35a5ffdfbb3b6e00162b6823f5"} Nov 25 17:37:27 crc kubenswrapper[4812]: I1125 17:37:27.457017 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"3748e190-c778-4c84-bd41-4b7dac8e569a","Type":"ContainerStarted","Data":"8d2a6bff67bae8a13eb96817258d394303c2af38668800352bfac45ede3f9dfc"} Nov 25 17:37:27 crc kubenswrapper[4812]: I1125 17:37:27.457183 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/manila-api-0" Nov 25 17:37:27 crc kubenswrapper[4812]: I1125 17:37:27.496612 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-76b5fdb995-t8l6h" podStartSLOduration=3.496593163 podStartE2EDuration="3.496593163s" podCreationTimestamp="2025-11-25 17:37:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:37:27.465273293 +0000 UTC m=+3022.305415388" watchObservedRunningTime="2025-11-25 17:37:27.496593163 +0000 UTC m=+3022.336735258" Nov 25 17:37:27 crc kubenswrapper[4812]: I1125 17:37:27.497479 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-api-0" podStartSLOduration=2.497472517 podStartE2EDuration="2.497472517s" podCreationTimestamp="2025-11-25 17:37:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:37:27.48950222 +0000 UTC m=+3022.329644335" watchObservedRunningTime="2025-11-25 17:37:27.497472517 +0000 UTC m=+3022.337614612" Nov 25 17:37:28 crc kubenswrapper[4812]: I1125 17:37:28.489761 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881","Type":"ContainerStarted","Data":"328c33146a80d830475d93b5bdc173e65c9fd351d137600eade535a711dd1fb3"} Nov 25 17:37:28 crc kubenswrapper[4812]: I1125 17:37:28.490075 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-api-0" podUID="3748e190-c778-4c84-bd41-4b7dac8e569a" containerName="manila-api-log" containerID="cri-o://8d2a6bff67bae8a13eb96817258d394303c2af38668800352bfac45ede3f9dfc" gracePeriod=30 Nov 25 17:37:28 crc kubenswrapper[4812]: I1125 17:37:28.490414 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-api-0" podUID="3748e190-c778-4c84-bd41-4b7dac8e569a" containerName="manila-api" containerID="cri-o://6fc9fe85adde81a8a4c26d57da999756782dfd35a5ffdfbb3b6e00162b6823f5" gracePeriod=30 Nov 25 17:37:28 crc kubenswrapper[4812]: I1125 17:37:28.529433 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-scheduler-0" podStartSLOduration=3.767525913 podStartE2EDuration="4.529417955s" podCreationTimestamp="2025-11-25 17:37:24 +0000 UTC" firstStartedPulling="2025-11-25 17:37:25.669417928 +0000 UTC m=+3020.509560023" lastFinishedPulling="2025-11-25 17:37:26.43130997 +0000 UTC m=+3021.271452065" observedRunningTime="2025-11-25 17:37:28.527922155 +0000 UTC m=+3023.368064250" watchObservedRunningTime="2025-11-25 17:37:28.529417955 +0000 UTC m=+3023.369560050" Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.301317 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-56b9b89f86-mb9q5" podUID="842381e2-aa1c-4a72-9db3-51bffd277741" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.248:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.248:8443: connect: connection refused" Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.362125 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.499832 4812 generic.go:334] "Generic (PLEG): container finished" podID="3748e190-c778-4c84-bd41-4b7dac8e569a" containerID="6fc9fe85adde81a8a4c26d57da999756782dfd35a5ffdfbb3b6e00162b6823f5" exitCode=0 Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.499863 4812 generic.go:334] "Generic (PLEG): container finished" podID="3748e190-c778-4c84-bd41-4b7dac8e569a" containerID="8d2a6bff67bae8a13eb96817258d394303c2af38668800352bfac45ede3f9dfc" exitCode=143 Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.500796 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.501284 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"3748e190-c778-4c84-bd41-4b7dac8e569a","Type":"ContainerDied","Data":"6fc9fe85adde81a8a4c26d57da999756782dfd35a5ffdfbb3b6e00162b6823f5"} Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.501333 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"3748e190-c778-4c84-bd41-4b7dac8e569a","Type":"ContainerDied","Data":"8d2a6bff67bae8a13eb96817258d394303c2af38668800352bfac45ede3f9dfc"} Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.501350 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"3748e190-c778-4c84-bd41-4b7dac8e569a","Type":"ContainerDied","Data":"e6b41810822b9e15b24d7cef42678c69aad0958bbe2e215ba239162a5111c3ae"} Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.501367 4812 scope.go:117] "RemoveContainer" containerID="6fc9fe85adde81a8a4c26d57da999756782dfd35a5ffdfbb3b6e00162b6823f5" Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.526094 4812 scope.go:117] "RemoveContainer" containerID="8d2a6bff67bae8a13eb96817258d394303c2af38668800352bfac45ede3f9dfc" Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.530102 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3748e190-c778-4c84-bd41-4b7dac8e569a-combined-ca-bundle\") pod \"3748e190-c778-4c84-bd41-4b7dac8e569a\" (UID: \"3748e190-c778-4c84-bd41-4b7dac8e569a\") " Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.530157 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wfkqd\" (UniqueName: \"kubernetes.io/projected/3748e190-c778-4c84-bd41-4b7dac8e569a-kube-api-access-wfkqd\") pod \"3748e190-c778-4c84-bd41-4b7dac8e569a\" (UID: \"3748e190-c778-4c84-bd41-4b7dac8e569a\") " Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.530336 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3748e190-c778-4c84-bd41-4b7dac8e569a-config-data\") pod \"3748e190-c778-4c84-bd41-4b7dac8e569a\" (UID: \"3748e190-c778-4c84-bd41-4b7dac8e569a\") " Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.530361 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3748e190-c778-4c84-bd41-4b7dac8e569a-logs\") pod \"3748e190-c778-4c84-bd41-4b7dac8e569a\" (UID: \"3748e190-c778-4c84-bd41-4b7dac8e569a\") " Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.530378 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3748e190-c778-4c84-bd41-4b7dac8e569a-scripts\") pod \"3748e190-c778-4c84-bd41-4b7dac8e569a\" (UID: \"3748e190-c778-4c84-bd41-4b7dac8e569a\") " Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.530399 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3748e190-c778-4c84-bd41-4b7dac8e569a-config-data-custom\") pod \"3748e190-c778-4c84-bd41-4b7dac8e569a\" (UID: \"3748e190-c778-4c84-bd41-4b7dac8e569a\") " Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.530439 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3748e190-c778-4c84-bd41-4b7dac8e569a-etc-machine-id\") pod \"3748e190-c778-4c84-bd41-4b7dac8e569a\" (UID: \"3748e190-c778-4c84-bd41-4b7dac8e569a\") " Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.530937 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3748e190-c778-4c84-bd41-4b7dac8e569a-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "3748e190-c778-4c84-bd41-4b7dac8e569a" (UID: "3748e190-c778-4c84-bd41-4b7dac8e569a"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.531350 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3748e190-c778-4c84-bd41-4b7dac8e569a-logs" (OuterVolumeSpecName: "logs") pod "3748e190-c778-4c84-bd41-4b7dac8e569a" (UID: "3748e190-c778-4c84-bd41-4b7dac8e569a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.540498 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3748e190-c778-4c84-bd41-4b7dac8e569a-scripts" (OuterVolumeSpecName: "scripts") pod "3748e190-c778-4c84-bd41-4b7dac8e569a" (UID: "3748e190-c778-4c84-bd41-4b7dac8e569a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.547920 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3748e190-c778-4c84-bd41-4b7dac8e569a-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "3748e190-c778-4c84-bd41-4b7dac8e569a" (UID: "3748e190-c778-4c84-bd41-4b7dac8e569a"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.561762 4812 scope.go:117] "RemoveContainer" containerID="6fc9fe85adde81a8a4c26d57da999756782dfd35a5ffdfbb3b6e00162b6823f5" Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.561772 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3748e190-c778-4c84-bd41-4b7dac8e569a-kube-api-access-wfkqd" (OuterVolumeSpecName: "kube-api-access-wfkqd") pod "3748e190-c778-4c84-bd41-4b7dac8e569a" (UID: "3748e190-c778-4c84-bd41-4b7dac8e569a"). InnerVolumeSpecName "kube-api-access-wfkqd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:37:29 crc kubenswrapper[4812]: E1125 17:37:29.563691 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6fc9fe85adde81a8a4c26d57da999756782dfd35a5ffdfbb3b6e00162b6823f5\": container with ID starting with 6fc9fe85adde81a8a4c26d57da999756782dfd35a5ffdfbb3b6e00162b6823f5 not found: ID does not exist" containerID="6fc9fe85adde81a8a4c26d57da999756782dfd35a5ffdfbb3b6e00162b6823f5" Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.563751 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6fc9fe85adde81a8a4c26d57da999756782dfd35a5ffdfbb3b6e00162b6823f5"} err="failed to get container status \"6fc9fe85adde81a8a4c26d57da999756782dfd35a5ffdfbb3b6e00162b6823f5\": rpc error: code = NotFound desc = could not find container \"6fc9fe85adde81a8a4c26d57da999756782dfd35a5ffdfbb3b6e00162b6823f5\": container with ID starting with 6fc9fe85adde81a8a4c26d57da999756782dfd35a5ffdfbb3b6e00162b6823f5 not found: ID does not exist" Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.563800 4812 scope.go:117] "RemoveContainer" containerID="8d2a6bff67bae8a13eb96817258d394303c2af38668800352bfac45ede3f9dfc" Nov 25 17:37:29 crc kubenswrapper[4812]: E1125 17:37:29.568807 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8d2a6bff67bae8a13eb96817258d394303c2af38668800352bfac45ede3f9dfc\": container with ID starting with 8d2a6bff67bae8a13eb96817258d394303c2af38668800352bfac45ede3f9dfc not found: ID does not exist" containerID="8d2a6bff67bae8a13eb96817258d394303c2af38668800352bfac45ede3f9dfc" Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.568860 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d2a6bff67bae8a13eb96817258d394303c2af38668800352bfac45ede3f9dfc"} err="failed to get container status \"8d2a6bff67bae8a13eb96817258d394303c2af38668800352bfac45ede3f9dfc\": rpc error: code = NotFound desc = could not find container \"8d2a6bff67bae8a13eb96817258d394303c2af38668800352bfac45ede3f9dfc\": container with ID starting with 8d2a6bff67bae8a13eb96817258d394303c2af38668800352bfac45ede3f9dfc not found: ID does not exist" Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.568893 4812 scope.go:117] "RemoveContainer" containerID="6fc9fe85adde81a8a4c26d57da999756782dfd35a5ffdfbb3b6e00162b6823f5" Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.569327 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3748e190-c778-4c84-bd41-4b7dac8e569a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3748e190-c778-4c84-bd41-4b7dac8e569a" (UID: "3748e190-c778-4c84-bd41-4b7dac8e569a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.569404 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6fc9fe85adde81a8a4c26d57da999756782dfd35a5ffdfbb3b6e00162b6823f5"} err="failed to get container status \"6fc9fe85adde81a8a4c26d57da999756782dfd35a5ffdfbb3b6e00162b6823f5\": rpc error: code = NotFound desc = could not find container \"6fc9fe85adde81a8a4c26d57da999756782dfd35a5ffdfbb3b6e00162b6823f5\": container with ID starting with 6fc9fe85adde81a8a4c26d57da999756782dfd35a5ffdfbb3b6e00162b6823f5 not found: ID does not exist" Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.569440 4812 scope.go:117] "RemoveContainer" containerID="8d2a6bff67bae8a13eb96817258d394303c2af38668800352bfac45ede3f9dfc" Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.569897 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d2a6bff67bae8a13eb96817258d394303c2af38668800352bfac45ede3f9dfc"} err="failed to get container status \"8d2a6bff67bae8a13eb96817258d394303c2af38668800352bfac45ede3f9dfc\": rpc error: code = NotFound desc = could not find container \"8d2a6bff67bae8a13eb96817258d394303c2af38668800352bfac45ede3f9dfc\": container with ID starting with 8d2a6bff67bae8a13eb96817258d394303c2af38668800352bfac45ede3f9dfc not found: ID does not exist" Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.598079 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3748e190-c778-4c84-bd41-4b7dac8e569a-config-data" (OuterVolumeSpecName: "config-data") pod "3748e190-c778-4c84-bd41-4b7dac8e569a" (UID: "3748e190-c778-4c84-bd41-4b7dac8e569a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.634305 4812 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3748e190-c778-4c84-bd41-4b7dac8e569a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.634460 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wfkqd\" (UniqueName: \"kubernetes.io/projected/3748e190-c778-4c84-bd41-4b7dac8e569a-kube-api-access-wfkqd\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.634491 4812 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3748e190-c778-4c84-bd41-4b7dac8e569a-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.634509 4812 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3748e190-c778-4c84-bd41-4b7dac8e569a-logs\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.634601 4812 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3748e190-c778-4c84-bd41-4b7dac8e569a-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.634621 4812 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3748e190-c778-4c84-bd41-4b7dac8e569a-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.634643 4812 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3748e190-c778-4c84-bd41-4b7dac8e569a-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.855786 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-api-0"] Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.860958 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-api-0"] Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.869160 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-api-0"] Nov 25 17:37:29 crc kubenswrapper[4812]: E1125 17:37:29.869569 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3748e190-c778-4c84-bd41-4b7dac8e569a" containerName="manila-api" Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.869589 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="3748e190-c778-4c84-bd41-4b7dac8e569a" containerName="manila-api" Nov 25 17:37:29 crc kubenswrapper[4812]: E1125 17:37:29.869611 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3748e190-c778-4c84-bd41-4b7dac8e569a" containerName="manila-api-log" Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.869618 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="3748e190-c778-4c84-bd41-4b7dac8e569a" containerName="manila-api-log" Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.869785 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="3748e190-c778-4c84-bd41-4b7dac8e569a" containerName="manila-api-log" Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.869815 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="3748e190-c778-4c84-bd41-4b7dac8e569a" containerName="manila-api" Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.870765 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.872788 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-api-config-data" Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.873165 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-manila-public-svc" Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.873676 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-manila-internal-svc" Nov 25 17:37:29 crc kubenswrapper[4812]: I1125 17:37:29.921524 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Nov 25 17:37:30 crc kubenswrapper[4812]: I1125 17:37:30.041046 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6d2f6681-d1d6-45e9-b0f4-65209caf0069-public-tls-certs\") pod \"manila-api-0\" (UID: \"6d2f6681-d1d6-45e9-b0f4-65209caf0069\") " pod="openstack/manila-api-0" Nov 25 17:37:30 crc kubenswrapper[4812]: I1125 17:37:30.041091 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6d2f6681-d1d6-45e9-b0f4-65209caf0069-config-data-custom\") pod \"manila-api-0\" (UID: \"6d2f6681-d1d6-45e9-b0f4-65209caf0069\") " pod="openstack/manila-api-0" Nov 25 17:37:30 crc kubenswrapper[4812]: I1125 17:37:30.041127 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d2f6681-d1d6-45e9-b0f4-65209caf0069-scripts\") pod \"manila-api-0\" (UID: \"6d2f6681-d1d6-45e9-b0f4-65209caf0069\") " pod="openstack/manila-api-0" Nov 25 17:37:30 crc kubenswrapper[4812]: I1125 17:37:30.041161 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6d2f6681-d1d6-45e9-b0f4-65209caf0069-internal-tls-certs\") pod \"manila-api-0\" (UID: \"6d2f6681-d1d6-45e9-b0f4-65209caf0069\") " pod="openstack/manila-api-0" Nov 25 17:37:30 crc kubenswrapper[4812]: I1125 17:37:30.041365 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d2f6681-d1d6-45e9-b0f4-65209caf0069-config-data\") pod \"manila-api-0\" (UID: \"6d2f6681-d1d6-45e9-b0f4-65209caf0069\") " pod="openstack/manila-api-0" Nov 25 17:37:30 crc kubenswrapper[4812]: I1125 17:37:30.041678 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w9g6j\" (UniqueName: \"kubernetes.io/projected/6d2f6681-d1d6-45e9-b0f4-65209caf0069-kube-api-access-w9g6j\") pod \"manila-api-0\" (UID: \"6d2f6681-d1d6-45e9-b0f4-65209caf0069\") " pod="openstack/manila-api-0" Nov 25 17:37:30 crc kubenswrapper[4812]: I1125 17:37:30.041904 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6d2f6681-d1d6-45e9-b0f4-65209caf0069-etc-machine-id\") pod \"manila-api-0\" (UID: \"6d2f6681-d1d6-45e9-b0f4-65209caf0069\") " pod="openstack/manila-api-0" Nov 25 17:37:30 crc kubenswrapper[4812]: I1125 17:37:30.041964 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d2f6681-d1d6-45e9-b0f4-65209caf0069-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"6d2f6681-d1d6-45e9-b0f4-65209caf0069\") " pod="openstack/manila-api-0" Nov 25 17:37:30 crc kubenswrapper[4812]: I1125 17:37:30.041990 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6d2f6681-d1d6-45e9-b0f4-65209caf0069-logs\") pod \"manila-api-0\" (UID: \"6d2f6681-d1d6-45e9-b0f4-65209caf0069\") " pod="openstack/manila-api-0" Nov 25 17:37:30 crc kubenswrapper[4812]: I1125 17:37:30.143707 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6d2f6681-d1d6-45e9-b0f4-65209caf0069-etc-machine-id\") pod \"manila-api-0\" (UID: \"6d2f6681-d1d6-45e9-b0f4-65209caf0069\") " pod="openstack/manila-api-0" Nov 25 17:37:30 crc kubenswrapper[4812]: I1125 17:37:30.143765 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d2f6681-d1d6-45e9-b0f4-65209caf0069-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"6d2f6681-d1d6-45e9-b0f4-65209caf0069\") " pod="openstack/manila-api-0" Nov 25 17:37:30 crc kubenswrapper[4812]: I1125 17:37:30.143789 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6d2f6681-d1d6-45e9-b0f4-65209caf0069-logs\") pod \"manila-api-0\" (UID: \"6d2f6681-d1d6-45e9-b0f4-65209caf0069\") " pod="openstack/manila-api-0" Nov 25 17:37:30 crc kubenswrapper[4812]: I1125 17:37:30.143821 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6d2f6681-d1d6-45e9-b0f4-65209caf0069-public-tls-certs\") pod \"manila-api-0\" (UID: \"6d2f6681-d1d6-45e9-b0f4-65209caf0069\") " pod="openstack/manila-api-0" Nov 25 17:37:30 crc kubenswrapper[4812]: I1125 17:37:30.143844 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6d2f6681-d1d6-45e9-b0f4-65209caf0069-config-data-custom\") pod \"manila-api-0\" (UID: \"6d2f6681-d1d6-45e9-b0f4-65209caf0069\") " pod="openstack/manila-api-0" Nov 25 17:37:30 crc kubenswrapper[4812]: I1125 17:37:30.143855 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6d2f6681-d1d6-45e9-b0f4-65209caf0069-etc-machine-id\") pod \"manila-api-0\" (UID: \"6d2f6681-d1d6-45e9-b0f4-65209caf0069\") " pod="openstack/manila-api-0" Nov 25 17:37:30 crc kubenswrapper[4812]: I1125 17:37:30.143873 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d2f6681-d1d6-45e9-b0f4-65209caf0069-scripts\") pod \"manila-api-0\" (UID: \"6d2f6681-d1d6-45e9-b0f4-65209caf0069\") " pod="openstack/manila-api-0" Nov 25 17:37:30 crc kubenswrapper[4812]: I1125 17:37:30.144004 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6d2f6681-d1d6-45e9-b0f4-65209caf0069-internal-tls-certs\") pod \"manila-api-0\" (UID: \"6d2f6681-d1d6-45e9-b0f4-65209caf0069\") " pod="openstack/manila-api-0" Nov 25 17:37:30 crc kubenswrapper[4812]: I1125 17:37:30.144084 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d2f6681-d1d6-45e9-b0f4-65209caf0069-config-data\") pod \"manila-api-0\" (UID: \"6d2f6681-d1d6-45e9-b0f4-65209caf0069\") " pod="openstack/manila-api-0" Nov 25 17:37:30 crc kubenswrapper[4812]: I1125 17:37:30.144166 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w9g6j\" (UniqueName: \"kubernetes.io/projected/6d2f6681-d1d6-45e9-b0f4-65209caf0069-kube-api-access-w9g6j\") pod \"manila-api-0\" (UID: \"6d2f6681-d1d6-45e9-b0f4-65209caf0069\") " pod="openstack/manila-api-0" Nov 25 17:37:30 crc kubenswrapper[4812]: I1125 17:37:30.144240 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6d2f6681-d1d6-45e9-b0f4-65209caf0069-logs\") pod \"manila-api-0\" (UID: \"6d2f6681-d1d6-45e9-b0f4-65209caf0069\") " pod="openstack/manila-api-0" Nov 25 17:37:30 crc kubenswrapper[4812]: I1125 17:37:30.152001 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d2f6681-d1d6-45e9-b0f4-65209caf0069-scripts\") pod \"manila-api-0\" (UID: \"6d2f6681-d1d6-45e9-b0f4-65209caf0069\") " pod="openstack/manila-api-0" Nov 25 17:37:30 crc kubenswrapper[4812]: I1125 17:37:30.157232 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d2f6681-d1d6-45e9-b0f4-65209caf0069-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"6d2f6681-d1d6-45e9-b0f4-65209caf0069\") " pod="openstack/manila-api-0" Nov 25 17:37:30 crc kubenswrapper[4812]: I1125 17:37:30.157669 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6d2f6681-d1d6-45e9-b0f4-65209caf0069-public-tls-certs\") pod \"manila-api-0\" (UID: \"6d2f6681-d1d6-45e9-b0f4-65209caf0069\") " pod="openstack/manila-api-0" Nov 25 17:37:30 crc kubenswrapper[4812]: I1125 17:37:30.158944 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6d2f6681-d1d6-45e9-b0f4-65209caf0069-internal-tls-certs\") pod \"manila-api-0\" (UID: \"6d2f6681-d1d6-45e9-b0f4-65209caf0069\") " pod="openstack/manila-api-0" Nov 25 17:37:30 crc kubenswrapper[4812]: I1125 17:37:30.159356 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6d2f6681-d1d6-45e9-b0f4-65209caf0069-config-data-custom\") pod \"manila-api-0\" (UID: \"6d2f6681-d1d6-45e9-b0f4-65209caf0069\") " pod="openstack/manila-api-0" Nov 25 17:37:30 crc kubenswrapper[4812]: I1125 17:37:30.171299 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d2f6681-d1d6-45e9-b0f4-65209caf0069-config-data\") pod \"manila-api-0\" (UID: \"6d2f6681-d1d6-45e9-b0f4-65209caf0069\") " pod="openstack/manila-api-0" Nov 25 17:37:30 crc kubenswrapper[4812]: I1125 17:37:30.175013 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w9g6j\" (UniqueName: \"kubernetes.io/projected/6d2f6681-d1d6-45e9-b0f4-65209caf0069-kube-api-access-w9g6j\") pod \"manila-api-0\" (UID: \"6d2f6681-d1d6-45e9-b0f4-65209caf0069\") " pod="openstack/manila-api-0" Nov 25 17:37:30 crc kubenswrapper[4812]: I1125 17:37:30.185763 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 25 17:37:30 crc kubenswrapper[4812]: I1125 17:37:30.592385 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:37:30 crc kubenswrapper[4812]: I1125 17:37:30.592939 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e" containerName="ceilometer-central-agent" containerID="cri-o://28431f50b304ab9fdd0ad7a04533707dacbedb2e9d840520339092663c124fb4" gracePeriod=30 Nov 25 17:37:30 crc kubenswrapper[4812]: I1125 17:37:30.593330 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e" containerName="proxy-httpd" containerID="cri-o://61c6bcb5896b79b6435f4372938772027e81000241f95fe81597275e78a8d066" gracePeriod=30 Nov 25 17:37:30 crc kubenswrapper[4812]: I1125 17:37:30.593378 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e" containerName="sg-core" containerID="cri-o://f322c6ae74b52c3736edb33ef2e48285044338720df862448e9abce33eb8d46e" gracePeriod=30 Nov 25 17:37:30 crc kubenswrapper[4812]: I1125 17:37:30.593408 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e" containerName="ceilometer-notification-agent" containerID="cri-o://5d8b3164f3054270c90c9cf8762d6a433edaaacf171506e9f33e17cf48ca200d" gracePeriod=30 Nov 25 17:37:30 crc kubenswrapper[4812]: E1125 17:37:30.685828 4812 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9470a3ba_0dee_4d6e_8c8e_41d3a0875e7e.slice/crio-f322c6ae74b52c3736edb33ef2e48285044338720df862448e9abce33eb8d46e.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9470a3ba_0dee_4d6e_8c8e_41d3a0875e7e.slice/crio-conmon-f322c6ae74b52c3736edb33ef2e48285044338720df862448e9abce33eb8d46e.scope\": RecentStats: unable to find data in memory cache]" Nov 25 17:37:30 crc kubenswrapper[4812]: I1125 17:37:30.848695 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Nov 25 17:37:31 crc kubenswrapper[4812]: I1125 17:37:31.543006 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"6d2f6681-d1d6-45e9-b0f4-65209caf0069","Type":"ContainerStarted","Data":"5abd8738ef53ae3369497cea768eabb4fec0fd6f4188cee175835715bd1d8558"} Nov 25 17:37:31 crc kubenswrapper[4812]: I1125 17:37:31.543338 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"6d2f6681-d1d6-45e9-b0f4-65209caf0069","Type":"ContainerStarted","Data":"22effd14eb41b56cedbb2f04c8ef8e76f6d05ec5aa5b22c1cecea694be354fd0"} Nov 25 17:37:31 crc kubenswrapper[4812]: I1125 17:37:31.576040 4812 generic.go:334] "Generic (PLEG): container finished" podID="9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e" containerID="61c6bcb5896b79b6435f4372938772027e81000241f95fe81597275e78a8d066" exitCode=0 Nov 25 17:37:31 crc kubenswrapper[4812]: I1125 17:37:31.576091 4812 generic.go:334] "Generic (PLEG): container finished" podID="9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e" containerID="f322c6ae74b52c3736edb33ef2e48285044338720df862448e9abce33eb8d46e" exitCode=2 Nov 25 17:37:31 crc kubenswrapper[4812]: I1125 17:37:31.576099 4812 generic.go:334] "Generic (PLEG): container finished" podID="9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e" containerID="28431f50b304ab9fdd0ad7a04533707dacbedb2e9d840520339092663c124fb4" exitCode=0 Nov 25 17:37:31 crc kubenswrapper[4812]: I1125 17:37:31.576133 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e","Type":"ContainerDied","Data":"61c6bcb5896b79b6435f4372938772027e81000241f95fe81597275e78a8d066"} Nov 25 17:37:31 crc kubenswrapper[4812]: I1125 17:37:31.576201 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e","Type":"ContainerDied","Data":"f322c6ae74b52c3736edb33ef2e48285044338720df862448e9abce33eb8d46e"} Nov 25 17:37:31 crc kubenswrapper[4812]: I1125 17:37:31.576215 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e","Type":"ContainerDied","Data":"28431f50b304ab9fdd0ad7a04533707dacbedb2e9d840520339092663c124fb4"} Nov 25 17:37:31 crc kubenswrapper[4812]: I1125 17:37:31.843742 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3748e190-c778-4c84-bd41-4b7dac8e569a" path="/var/lib/kubelet/pods/3748e190-c778-4c84-bd41-4b7dac8e569a/volumes" Nov 25 17:37:35 crc kubenswrapper[4812]: I1125 17:37:35.122872 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-scheduler-0" Nov 25 17:37:35 crc kubenswrapper[4812]: I1125 17:37:35.213794 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-76b5fdb995-t8l6h" Nov 25 17:37:35 crc kubenswrapper[4812]: I1125 17:37:35.294624 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-864d5fc68c-fcqj5"] Nov 25 17:37:35 crc kubenswrapper[4812]: I1125 17:37:35.296212 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-864d5fc68c-fcqj5" podUID="ad724bc1-eeea-44a3-bd54-b2247235f111" containerName="dnsmasq-dns" containerID="cri-o://8fed418639925d7655a7402531172fcd9c93199bbad5c44d7f59493b2890a4e0" gracePeriod=10 Nov 25 17:37:35 crc kubenswrapper[4812]: I1125 17:37:35.630520 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"6d2f6681-d1d6-45e9-b0f4-65209caf0069","Type":"ContainerStarted","Data":"0c5bf9c6c7264bb69602a9bfe8754b6df89eb3c5b2fbc7e4de36f3a244f3f03e"} Nov 25 17:37:35 crc kubenswrapper[4812]: I1125 17:37:35.631064 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/manila-api-0" Nov 25 17:37:35 crc kubenswrapper[4812]: I1125 17:37:35.646158 4812 generic.go:334] "Generic (PLEG): container finished" podID="ad724bc1-eeea-44a3-bd54-b2247235f111" containerID="8fed418639925d7655a7402531172fcd9c93199bbad5c44d7f59493b2890a4e0" exitCode=0 Nov 25 17:37:35 crc kubenswrapper[4812]: I1125 17:37:35.646251 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-864d5fc68c-fcqj5" event={"ID":"ad724bc1-eeea-44a3-bd54-b2247235f111","Type":"ContainerDied","Data":"8fed418639925d7655a7402531172fcd9c93199bbad5c44d7f59493b2890a4e0"} Nov 25 17:37:35 crc kubenswrapper[4812]: I1125 17:37:35.648000 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"8af2d4e3-790f-4ab4-92e8-1c0a083b9531","Type":"ContainerStarted","Data":"97fdcd5015b48b082dae89e627846b80454584f06e03362d366b106e7f145412"} Nov 25 17:37:35 crc kubenswrapper[4812]: I1125 17:37:35.648043 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"8af2d4e3-790f-4ab4-92e8-1c0a083b9531","Type":"ContainerStarted","Data":"44f99050a2f773dee431b5543ae0ed32099ff52da5207847b262d808f4249df2"} Nov 25 17:37:35 crc kubenswrapper[4812]: I1125 17:37:35.674237 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-api-0" podStartSLOduration=6.674101611 podStartE2EDuration="6.674101611s" podCreationTimestamp="2025-11-25 17:37:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:37:35.653463532 +0000 UTC m=+3030.493605627" watchObservedRunningTime="2025-11-25 17:37:35.674101611 +0000 UTC m=+3030.514243706" Nov 25 17:37:35 crc kubenswrapper[4812]: I1125 17:37:35.718119 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-share-share1-0" podStartSLOduration=3.1342822630000002 podStartE2EDuration="11.718096625s" podCreationTimestamp="2025-11-25 17:37:24 +0000 UTC" firstStartedPulling="2025-11-25 17:37:25.657910256 +0000 UTC m=+3020.498052351" lastFinishedPulling="2025-11-25 17:37:34.241724628 +0000 UTC m=+3029.081866713" observedRunningTime="2025-11-25 17:37:35.713243353 +0000 UTC m=+3030.553385448" watchObservedRunningTime="2025-11-25 17:37:35.718096625 +0000 UTC m=+3030.558238720" Nov 25 17:37:35 crc kubenswrapper[4812]: I1125 17:37:35.837052 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-864d5fc68c-fcqj5" Nov 25 17:37:35 crc kubenswrapper[4812]: I1125 17:37:35.894107 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/ad724bc1-eeea-44a3-bd54-b2247235f111-openstack-edpm-ipam\") pod \"ad724bc1-eeea-44a3-bd54-b2247235f111\" (UID: \"ad724bc1-eeea-44a3-bd54-b2247235f111\") " Nov 25 17:37:35 crc kubenswrapper[4812]: I1125 17:37:35.894220 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ad724bc1-eeea-44a3-bd54-b2247235f111-ovsdbserver-sb\") pod \"ad724bc1-eeea-44a3-bd54-b2247235f111\" (UID: \"ad724bc1-eeea-44a3-bd54-b2247235f111\") " Nov 25 17:37:35 crc kubenswrapper[4812]: I1125 17:37:35.894266 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8g5kl\" (UniqueName: \"kubernetes.io/projected/ad724bc1-eeea-44a3-bd54-b2247235f111-kube-api-access-8g5kl\") pod \"ad724bc1-eeea-44a3-bd54-b2247235f111\" (UID: \"ad724bc1-eeea-44a3-bd54-b2247235f111\") " Nov 25 17:37:35 crc kubenswrapper[4812]: I1125 17:37:35.894291 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ad724bc1-eeea-44a3-bd54-b2247235f111-dns-svc\") pod \"ad724bc1-eeea-44a3-bd54-b2247235f111\" (UID: \"ad724bc1-eeea-44a3-bd54-b2247235f111\") " Nov 25 17:37:35 crc kubenswrapper[4812]: I1125 17:37:35.894423 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ad724bc1-eeea-44a3-bd54-b2247235f111-ovsdbserver-nb\") pod \"ad724bc1-eeea-44a3-bd54-b2247235f111\" (UID: \"ad724bc1-eeea-44a3-bd54-b2247235f111\") " Nov 25 17:37:35 crc kubenswrapper[4812]: I1125 17:37:35.894443 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ad724bc1-eeea-44a3-bd54-b2247235f111-config\") pod \"ad724bc1-eeea-44a3-bd54-b2247235f111\" (UID: \"ad724bc1-eeea-44a3-bd54-b2247235f111\") " Nov 25 17:37:35 crc kubenswrapper[4812]: I1125 17:37:35.922356 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad724bc1-eeea-44a3-bd54-b2247235f111-kube-api-access-8g5kl" (OuterVolumeSpecName: "kube-api-access-8g5kl") pod "ad724bc1-eeea-44a3-bd54-b2247235f111" (UID: "ad724bc1-eeea-44a3-bd54-b2247235f111"). InnerVolumeSpecName "kube-api-access-8g5kl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:37:35 crc kubenswrapper[4812]: I1125 17:37:35.947924 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ad724bc1-eeea-44a3-bd54-b2247235f111-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ad724bc1-eeea-44a3-bd54-b2247235f111" (UID: "ad724bc1-eeea-44a3-bd54-b2247235f111"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:37:35 crc kubenswrapper[4812]: I1125 17:37:35.960988 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ad724bc1-eeea-44a3-bd54-b2247235f111-config" (OuterVolumeSpecName: "config") pod "ad724bc1-eeea-44a3-bd54-b2247235f111" (UID: "ad724bc1-eeea-44a3-bd54-b2247235f111"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:37:35 crc kubenswrapper[4812]: I1125 17:37:35.968697 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ad724bc1-eeea-44a3-bd54-b2247235f111-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "ad724bc1-eeea-44a3-bd54-b2247235f111" (UID: "ad724bc1-eeea-44a3-bd54-b2247235f111"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:37:35 crc kubenswrapper[4812]: I1125 17:37:35.976638 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ad724bc1-eeea-44a3-bd54-b2247235f111-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ad724bc1-eeea-44a3-bd54-b2247235f111" (UID: "ad724bc1-eeea-44a3-bd54-b2247235f111"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:37:35 crc kubenswrapper[4812]: I1125 17:37:35.987310 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ad724bc1-eeea-44a3-bd54-b2247235f111-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ad724bc1-eeea-44a3-bd54-b2247235f111" (UID: "ad724bc1-eeea-44a3-bd54-b2247235f111"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:37:35 crc kubenswrapper[4812]: I1125 17:37:35.997218 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8g5kl\" (UniqueName: \"kubernetes.io/projected/ad724bc1-eeea-44a3-bd54-b2247235f111-kube-api-access-8g5kl\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:35 crc kubenswrapper[4812]: I1125 17:37:35.997263 4812 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ad724bc1-eeea-44a3-bd54-b2247235f111-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:35 crc kubenswrapper[4812]: I1125 17:37:35.997277 4812 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ad724bc1-eeea-44a3-bd54-b2247235f111-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:35 crc kubenswrapper[4812]: I1125 17:37:35.997288 4812 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ad724bc1-eeea-44a3-bd54-b2247235f111-config\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:35 crc kubenswrapper[4812]: I1125 17:37:35.997299 4812 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/ad724bc1-eeea-44a3-bd54-b2247235f111-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:35 crc kubenswrapper[4812]: I1125 17:37:35.997309 4812 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ad724bc1-eeea-44a3-bd54-b2247235f111-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:36 crc kubenswrapper[4812]: I1125 17:37:36.657998 4812 generic.go:334] "Generic (PLEG): container finished" podID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" containerID="44f99050a2f773dee431b5543ae0ed32099ff52da5207847b262d808f4249df2" exitCode=1 Nov 25 17:37:36 crc kubenswrapper[4812]: I1125 17:37:36.658102 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"8af2d4e3-790f-4ab4-92e8-1c0a083b9531","Type":"ContainerDied","Data":"44f99050a2f773dee431b5543ae0ed32099ff52da5207847b262d808f4249df2"} Nov 25 17:37:36 crc kubenswrapper[4812]: I1125 17:37:36.658695 4812 scope.go:117] "RemoveContainer" containerID="44f99050a2f773dee431b5543ae0ed32099ff52da5207847b262d808f4249df2" Nov 25 17:37:36 crc kubenswrapper[4812]: I1125 17:37:36.661323 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-864d5fc68c-fcqj5" event={"ID":"ad724bc1-eeea-44a3-bd54-b2247235f111","Type":"ContainerDied","Data":"007eb560793e9595c75aed8a8619ce291bf0285007765afdde297432d17fad00"} Nov 25 17:37:36 crc kubenswrapper[4812]: I1125 17:37:36.661382 4812 scope.go:117] "RemoveContainer" containerID="8fed418639925d7655a7402531172fcd9c93199bbad5c44d7f59493b2890a4e0" Nov 25 17:37:36 crc kubenswrapper[4812]: I1125 17:37:36.661396 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-864d5fc68c-fcqj5" Nov 25 17:37:36 crc kubenswrapper[4812]: I1125 17:37:36.693838 4812 scope.go:117] "RemoveContainer" containerID="cf49f7d4f2ddbc75beb73b290492202c2d7123de28e4170f4ccb26e03f1bbf94" Nov 25 17:37:36 crc kubenswrapper[4812]: I1125 17:37:36.709445 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-864d5fc68c-fcqj5"] Nov 25 17:37:36 crc kubenswrapper[4812]: I1125 17:37:36.721236 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-864d5fc68c-fcqj5"] Nov 25 17:37:37 crc kubenswrapper[4812]: I1125 17:37:37.672308 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"8af2d4e3-790f-4ab4-92e8-1c0a083b9531","Type":"ContainerStarted","Data":"fcee6a1e99e0314c5643be1ff9987a4421641277fcd013b16340ed50b373f09c"} Nov 25 17:37:37 crc kubenswrapper[4812]: I1125 17:37:37.844614 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad724bc1-eeea-44a3-bd54-b2247235f111" path="/var/lib/kubelet/pods/ad724bc1-eeea-44a3-bd54-b2247235f111/volumes" Nov 25 17:37:38 crc kubenswrapper[4812]: I1125 17:37:38.686546 4812 generic.go:334] "Generic (PLEG): container finished" podID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" containerID="fcee6a1e99e0314c5643be1ff9987a4421641277fcd013b16340ed50b373f09c" exitCode=1 Nov 25 17:37:38 crc kubenswrapper[4812]: I1125 17:37:38.686598 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"8af2d4e3-790f-4ab4-92e8-1c0a083b9531","Type":"ContainerDied","Data":"fcee6a1e99e0314c5643be1ff9987a4421641277fcd013b16340ed50b373f09c"} Nov 25 17:37:38 crc kubenswrapper[4812]: I1125 17:37:38.686692 4812 scope.go:117] "RemoveContainer" containerID="44f99050a2f773dee431b5543ae0ed32099ff52da5207847b262d808f4249df2" Nov 25 17:37:38 crc kubenswrapper[4812]: I1125 17:37:38.687225 4812 scope.go:117] "RemoveContainer" containerID="fcee6a1e99e0314c5643be1ff9987a4421641277fcd013b16340ed50b373f09c" Nov 25 17:37:38 crc kubenswrapper[4812]: E1125 17:37:38.687783 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:37:39 crc kubenswrapper[4812]: I1125 17:37:39.301186 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-56b9b89f86-mb9q5" podUID="842381e2-aa1c-4a72-9db3-51bffd277741" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.248:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.248:8443: connect: connection refused" Nov 25 17:37:39 crc kubenswrapper[4812]: I1125 17:37:39.702090 4812 scope.go:117] "RemoveContainer" containerID="fcee6a1e99e0314c5643be1ff9987a4421641277fcd013b16340ed50b373f09c" Nov 25 17:37:39 crc kubenswrapper[4812]: E1125 17:37:39.702396 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.485567 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.591197 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-ceilometer-tls-certs\") pod \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\" (UID: \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\") " Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.591278 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z2z26\" (UniqueName: \"kubernetes.io/projected/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-kube-api-access-z2z26\") pod \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\" (UID: \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\") " Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.591316 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-config-data\") pod \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\" (UID: \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\") " Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.591365 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-sg-core-conf-yaml\") pod \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\" (UID: \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\") " Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.591457 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-scripts\") pod \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\" (UID: \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\") " Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.591491 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-run-httpd\") pod \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\" (UID: \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\") " Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.591523 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-combined-ca-bundle\") pod \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\" (UID: \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\") " Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.591635 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-log-httpd\") pod \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\" (UID: \"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e\") " Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.592008 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e" (UID: "9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.592361 4812 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.592464 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e" (UID: "9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.600162 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-kube-api-access-z2z26" (OuterVolumeSpecName: "kube-api-access-z2z26") pod "9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e" (UID: "9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e"). InnerVolumeSpecName "kube-api-access-z2z26". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.602041 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-scripts" (OuterVolumeSpecName: "scripts") pod "9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e" (UID: "9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.632897 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e" (UID: "9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.676662 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e" (UID: "9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.693912 4812 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.693942 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z2z26\" (UniqueName: \"kubernetes.io/projected/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-kube-api-access-z2z26\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.693954 4812 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.693964 4812 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.693973 4812 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.697514 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e" (UID: "9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.714284 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-config-data" (OuterVolumeSpecName: "config-data") pod "9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e" (UID: "9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.715568 4812 generic.go:334] "Generic (PLEG): container finished" podID="9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e" containerID="5d8b3164f3054270c90c9cf8762d6a433edaaacf171506e9f33e17cf48ca200d" exitCode=0 Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.715602 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e","Type":"ContainerDied","Data":"5d8b3164f3054270c90c9cf8762d6a433edaaacf171506e9f33e17cf48ca200d"} Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.715633 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e","Type":"ContainerDied","Data":"9604ae0888ec7729826b5c679fb9c8c16a8c3234b13de2edca5f577e45fb868f"} Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.715636 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.715650 4812 scope.go:117] "RemoveContainer" containerID="61c6bcb5896b79b6435f4372938772027e81000241f95fe81597275e78a8d066" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.748605 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.752067 4812 scope.go:117] "RemoveContainer" containerID="f322c6ae74b52c3736edb33ef2e48285044338720df862448e9abce33eb8d46e" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.756108 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.773704 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:37:40 crc kubenswrapper[4812]: E1125 17:37:40.774204 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e" containerName="ceilometer-notification-agent" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.774224 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e" containerName="ceilometer-notification-agent" Nov 25 17:37:40 crc kubenswrapper[4812]: E1125 17:37:40.774257 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad724bc1-eeea-44a3-bd54-b2247235f111" containerName="dnsmasq-dns" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.774266 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad724bc1-eeea-44a3-bd54-b2247235f111" containerName="dnsmasq-dns" Nov 25 17:37:40 crc kubenswrapper[4812]: E1125 17:37:40.774296 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e" containerName="sg-core" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.774302 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e" containerName="sg-core" Nov 25 17:37:40 crc kubenswrapper[4812]: E1125 17:37:40.774320 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e" containerName="ceilometer-central-agent" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.774326 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e" containerName="ceilometer-central-agent" Nov 25 17:37:40 crc kubenswrapper[4812]: E1125 17:37:40.774334 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad724bc1-eeea-44a3-bd54-b2247235f111" containerName="init" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.774350 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad724bc1-eeea-44a3-bd54-b2247235f111" containerName="init" Nov 25 17:37:40 crc kubenswrapper[4812]: E1125 17:37:40.774362 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e" containerName="proxy-httpd" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.774367 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e" containerName="proxy-httpd" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.774640 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad724bc1-eeea-44a3-bd54-b2247235f111" containerName="dnsmasq-dns" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.774661 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e" containerName="ceilometer-central-agent" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.774673 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e" containerName="sg-core" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.774687 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e" containerName="proxy-httpd" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.774695 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e" containerName="ceilometer-notification-agent" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.779064 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.783660 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.783964 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.784183 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.795200 4812 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.795225 4812 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.806030 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.814709 4812 scope.go:117] "RemoveContainer" containerID="5d8b3164f3054270c90c9cf8762d6a433edaaacf171506e9f33e17cf48ca200d" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.837861 4812 scope.go:117] "RemoveContainer" containerID="28431f50b304ab9fdd0ad7a04533707dacbedb2e9d840520339092663c124fb4" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.864031 4812 scope.go:117] "RemoveContainer" containerID="61c6bcb5896b79b6435f4372938772027e81000241f95fe81597275e78a8d066" Nov 25 17:37:40 crc kubenswrapper[4812]: E1125 17:37:40.864480 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"61c6bcb5896b79b6435f4372938772027e81000241f95fe81597275e78a8d066\": container with ID starting with 61c6bcb5896b79b6435f4372938772027e81000241f95fe81597275e78a8d066 not found: ID does not exist" containerID="61c6bcb5896b79b6435f4372938772027e81000241f95fe81597275e78a8d066" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.864518 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61c6bcb5896b79b6435f4372938772027e81000241f95fe81597275e78a8d066"} err="failed to get container status \"61c6bcb5896b79b6435f4372938772027e81000241f95fe81597275e78a8d066\": rpc error: code = NotFound desc = could not find container \"61c6bcb5896b79b6435f4372938772027e81000241f95fe81597275e78a8d066\": container with ID starting with 61c6bcb5896b79b6435f4372938772027e81000241f95fe81597275e78a8d066 not found: ID does not exist" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.864604 4812 scope.go:117] "RemoveContainer" containerID="f322c6ae74b52c3736edb33ef2e48285044338720df862448e9abce33eb8d46e" Nov 25 17:37:40 crc kubenswrapper[4812]: E1125 17:37:40.864903 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f322c6ae74b52c3736edb33ef2e48285044338720df862448e9abce33eb8d46e\": container with ID starting with f322c6ae74b52c3736edb33ef2e48285044338720df862448e9abce33eb8d46e not found: ID does not exist" containerID="f322c6ae74b52c3736edb33ef2e48285044338720df862448e9abce33eb8d46e" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.864924 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f322c6ae74b52c3736edb33ef2e48285044338720df862448e9abce33eb8d46e"} err="failed to get container status \"f322c6ae74b52c3736edb33ef2e48285044338720df862448e9abce33eb8d46e\": rpc error: code = NotFound desc = could not find container \"f322c6ae74b52c3736edb33ef2e48285044338720df862448e9abce33eb8d46e\": container with ID starting with f322c6ae74b52c3736edb33ef2e48285044338720df862448e9abce33eb8d46e not found: ID does not exist" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.864939 4812 scope.go:117] "RemoveContainer" containerID="5d8b3164f3054270c90c9cf8762d6a433edaaacf171506e9f33e17cf48ca200d" Nov 25 17:37:40 crc kubenswrapper[4812]: E1125 17:37:40.865140 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5d8b3164f3054270c90c9cf8762d6a433edaaacf171506e9f33e17cf48ca200d\": container with ID starting with 5d8b3164f3054270c90c9cf8762d6a433edaaacf171506e9f33e17cf48ca200d not found: ID does not exist" containerID="5d8b3164f3054270c90c9cf8762d6a433edaaacf171506e9f33e17cf48ca200d" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.865162 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5d8b3164f3054270c90c9cf8762d6a433edaaacf171506e9f33e17cf48ca200d"} err="failed to get container status \"5d8b3164f3054270c90c9cf8762d6a433edaaacf171506e9f33e17cf48ca200d\": rpc error: code = NotFound desc = could not find container \"5d8b3164f3054270c90c9cf8762d6a433edaaacf171506e9f33e17cf48ca200d\": container with ID starting with 5d8b3164f3054270c90c9cf8762d6a433edaaacf171506e9f33e17cf48ca200d not found: ID does not exist" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.865178 4812 scope.go:117] "RemoveContainer" containerID="28431f50b304ab9fdd0ad7a04533707dacbedb2e9d840520339092663c124fb4" Nov 25 17:37:40 crc kubenswrapper[4812]: E1125 17:37:40.865788 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"28431f50b304ab9fdd0ad7a04533707dacbedb2e9d840520339092663c124fb4\": container with ID starting with 28431f50b304ab9fdd0ad7a04533707dacbedb2e9d840520339092663c124fb4 not found: ID does not exist" containerID="28431f50b304ab9fdd0ad7a04533707dacbedb2e9d840520339092663c124fb4" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.865815 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28431f50b304ab9fdd0ad7a04533707dacbedb2e9d840520339092663c124fb4"} err="failed to get container status \"28431f50b304ab9fdd0ad7a04533707dacbedb2e9d840520339092663c124fb4\": rpc error: code = NotFound desc = could not find container \"28431f50b304ab9fdd0ad7a04533707dacbedb2e9d840520339092663c124fb4\": container with ID starting with 28431f50b304ab9fdd0ad7a04533707dacbedb2e9d840520339092663c124fb4 not found: ID does not exist" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.897350 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-blkmv\" (UniqueName: \"kubernetes.io/projected/24d72c8f-af4a-4e0a-a148-0a5437c540b4-kube-api-access-blkmv\") pod \"ceilometer-0\" (UID: \"24d72c8f-af4a-4e0a-a148-0a5437c540b4\") " pod="openstack/ceilometer-0" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.897524 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/24d72c8f-af4a-4e0a-a148-0a5437c540b4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"24d72c8f-af4a-4e0a-a148-0a5437c540b4\") " pod="openstack/ceilometer-0" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.897577 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/24d72c8f-af4a-4e0a-a148-0a5437c540b4-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"24d72c8f-af4a-4e0a-a148-0a5437c540b4\") " pod="openstack/ceilometer-0" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.897636 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/24d72c8f-af4a-4e0a-a148-0a5437c540b4-run-httpd\") pod \"ceilometer-0\" (UID: \"24d72c8f-af4a-4e0a-a148-0a5437c540b4\") " pod="openstack/ceilometer-0" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.897668 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/24d72c8f-af4a-4e0a-a148-0a5437c540b4-log-httpd\") pod \"ceilometer-0\" (UID: \"24d72c8f-af4a-4e0a-a148-0a5437c540b4\") " pod="openstack/ceilometer-0" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.897693 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24d72c8f-af4a-4e0a-a148-0a5437c540b4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"24d72c8f-af4a-4e0a-a148-0a5437c540b4\") " pod="openstack/ceilometer-0" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.897722 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/24d72c8f-af4a-4e0a-a148-0a5437c540b4-scripts\") pod \"ceilometer-0\" (UID: \"24d72c8f-af4a-4e0a-a148-0a5437c540b4\") " pod="openstack/ceilometer-0" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.897746 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24d72c8f-af4a-4e0a-a148-0a5437c540b4-config-data\") pod \"ceilometer-0\" (UID: \"24d72c8f-af4a-4e0a-a148-0a5437c540b4\") " pod="openstack/ceilometer-0" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.999344 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/24d72c8f-af4a-4e0a-a148-0a5437c540b4-scripts\") pod \"ceilometer-0\" (UID: \"24d72c8f-af4a-4e0a-a148-0a5437c540b4\") " pod="openstack/ceilometer-0" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.999390 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24d72c8f-af4a-4e0a-a148-0a5437c540b4-config-data\") pod \"ceilometer-0\" (UID: \"24d72c8f-af4a-4e0a-a148-0a5437c540b4\") " pod="openstack/ceilometer-0" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.999466 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-blkmv\" (UniqueName: \"kubernetes.io/projected/24d72c8f-af4a-4e0a-a148-0a5437c540b4-kube-api-access-blkmv\") pod \"ceilometer-0\" (UID: \"24d72c8f-af4a-4e0a-a148-0a5437c540b4\") " pod="openstack/ceilometer-0" Nov 25 17:37:40 crc kubenswrapper[4812]: I1125 17:37:40.999589 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/24d72c8f-af4a-4e0a-a148-0a5437c540b4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"24d72c8f-af4a-4e0a-a148-0a5437c540b4\") " pod="openstack/ceilometer-0" Nov 25 17:37:41 crc kubenswrapper[4812]: I1125 17:37:41.000042 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/24d72c8f-af4a-4e0a-a148-0a5437c540b4-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"24d72c8f-af4a-4e0a-a148-0a5437c540b4\") " pod="openstack/ceilometer-0" Nov 25 17:37:41 crc kubenswrapper[4812]: I1125 17:37:41.000104 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/24d72c8f-af4a-4e0a-a148-0a5437c540b4-run-httpd\") pod \"ceilometer-0\" (UID: \"24d72c8f-af4a-4e0a-a148-0a5437c540b4\") " pod="openstack/ceilometer-0" Nov 25 17:37:41 crc kubenswrapper[4812]: I1125 17:37:41.000143 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/24d72c8f-af4a-4e0a-a148-0a5437c540b4-log-httpd\") pod \"ceilometer-0\" (UID: \"24d72c8f-af4a-4e0a-a148-0a5437c540b4\") " pod="openstack/ceilometer-0" Nov 25 17:37:41 crc kubenswrapper[4812]: I1125 17:37:41.000163 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24d72c8f-af4a-4e0a-a148-0a5437c540b4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"24d72c8f-af4a-4e0a-a148-0a5437c540b4\") " pod="openstack/ceilometer-0" Nov 25 17:37:41 crc kubenswrapper[4812]: I1125 17:37:41.000557 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/24d72c8f-af4a-4e0a-a148-0a5437c540b4-run-httpd\") pod \"ceilometer-0\" (UID: \"24d72c8f-af4a-4e0a-a148-0a5437c540b4\") " pod="openstack/ceilometer-0" Nov 25 17:37:41 crc kubenswrapper[4812]: I1125 17:37:41.001159 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/24d72c8f-af4a-4e0a-a148-0a5437c540b4-log-httpd\") pod \"ceilometer-0\" (UID: \"24d72c8f-af4a-4e0a-a148-0a5437c540b4\") " pod="openstack/ceilometer-0" Nov 25 17:37:41 crc kubenswrapper[4812]: I1125 17:37:41.004453 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/24d72c8f-af4a-4e0a-a148-0a5437c540b4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"24d72c8f-af4a-4e0a-a148-0a5437c540b4\") " pod="openstack/ceilometer-0" Nov 25 17:37:41 crc kubenswrapper[4812]: I1125 17:37:41.004566 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/24d72c8f-af4a-4e0a-a148-0a5437c540b4-scripts\") pod \"ceilometer-0\" (UID: \"24d72c8f-af4a-4e0a-a148-0a5437c540b4\") " pod="openstack/ceilometer-0" Nov 25 17:37:41 crc kubenswrapper[4812]: I1125 17:37:41.005268 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24d72c8f-af4a-4e0a-a148-0a5437c540b4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"24d72c8f-af4a-4e0a-a148-0a5437c540b4\") " pod="openstack/ceilometer-0" Nov 25 17:37:41 crc kubenswrapper[4812]: I1125 17:37:41.008312 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/24d72c8f-af4a-4e0a-a148-0a5437c540b4-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"24d72c8f-af4a-4e0a-a148-0a5437c540b4\") " pod="openstack/ceilometer-0" Nov 25 17:37:41 crc kubenswrapper[4812]: I1125 17:37:41.008440 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24d72c8f-af4a-4e0a-a148-0a5437c540b4-config-data\") pod \"ceilometer-0\" (UID: \"24d72c8f-af4a-4e0a-a148-0a5437c540b4\") " pod="openstack/ceilometer-0" Nov 25 17:37:41 crc kubenswrapper[4812]: I1125 17:37:41.020309 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-blkmv\" (UniqueName: \"kubernetes.io/projected/24d72c8f-af4a-4e0a-a148-0a5437c540b4-kube-api-access-blkmv\") pod \"ceilometer-0\" (UID: \"24d72c8f-af4a-4e0a-a148-0a5437c540b4\") " pod="openstack/ceilometer-0" Nov 25 17:37:41 crc kubenswrapper[4812]: I1125 17:37:41.116037 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 17:37:41 crc kubenswrapper[4812]: I1125 17:37:41.650245 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 17:37:41 crc kubenswrapper[4812]: W1125 17:37:41.660689 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod24d72c8f_af4a_4e0a_a148_0a5437c540b4.slice/crio-ebef554fbdf250a0d9fa37d0844f48c568868d71ef5a95eb81e9a303911032fd WatchSource:0}: Error finding container ebef554fbdf250a0d9fa37d0844f48c568868d71ef5a95eb81e9a303911032fd: Status 404 returned error can't find the container with id ebef554fbdf250a0d9fa37d0844f48c568868d71ef5a95eb81e9a303911032fd Nov 25 17:37:41 crc kubenswrapper[4812]: I1125 17:37:41.731125 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"24d72c8f-af4a-4e0a-a148-0a5437c540b4","Type":"ContainerStarted","Data":"ebef554fbdf250a0d9fa37d0844f48c568868d71ef5a95eb81e9a303911032fd"} Nov 25 17:37:41 crc kubenswrapper[4812]: I1125 17:37:41.833020 4812 scope.go:117] "RemoveContainer" containerID="2ae50c0855798672cbbf542aacde517f5fc4d85f0598c9ce69441f2a863ae0cd" Nov 25 17:37:41 crc kubenswrapper[4812]: E1125 17:37:41.833511 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:37:41 crc kubenswrapper[4812]: I1125 17:37:41.845758 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e" path="/var/lib/kubelet/pods/9470a3ba-0dee-4d6e-8c8e-41d3a0875e7e/volumes" Nov 25 17:37:42 crc kubenswrapper[4812]: I1125 17:37:42.741937 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"24d72c8f-af4a-4e0a-a148-0a5437c540b4","Type":"ContainerStarted","Data":"366bc9f6846239c11b0da41929a6472b8b0c9c43de027061eb4d9685a845b538"} Nov 25 17:37:43 crc kubenswrapper[4812]: I1125 17:37:43.753817 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"24d72c8f-af4a-4e0a-a148-0a5437c540b4","Type":"ContainerStarted","Data":"687dd2b26c9f44bb32c49a51355fd18d659ad6dfc977cfd096a41546f6eda705"} Nov 25 17:37:44 crc kubenswrapper[4812]: I1125 17:37:44.766445 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"24d72c8f-af4a-4e0a-a148-0a5437c540b4","Type":"ContainerStarted","Data":"8c9afdf9c7ceeefd1b171b005800847dd1d1f35f4c3baefef8d93acaf2bdd7f3"} Nov 25 17:37:45 crc kubenswrapper[4812]: I1125 17:37:45.164024 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 25 17:37:45 crc kubenswrapper[4812]: I1125 17:37:45.164355 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 25 17:37:45 crc kubenswrapper[4812]: I1125 17:37:45.164372 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 25 17:37:45 crc kubenswrapper[4812]: I1125 17:37:45.165112 4812 scope.go:117] "RemoveContainer" containerID="fcee6a1e99e0314c5643be1ff9987a4421641277fcd013b16340ed50b373f09c" Nov 25 17:37:45 crc kubenswrapper[4812]: E1125 17:37:45.165407 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:37:45 crc kubenswrapper[4812]: I1125 17:37:45.778062 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"24d72c8f-af4a-4e0a-a148-0a5437c540b4","Type":"ContainerStarted","Data":"e6683c9d1f8755e2ac58aab06ffa78dffc18824d5eca1decc3c86fc80a2945cf"} Nov 25 17:37:45 crc kubenswrapper[4812]: I1125 17:37:45.778854 4812 scope.go:117] "RemoveContainer" containerID="fcee6a1e99e0314c5643be1ff9987a4421641277fcd013b16340ed50b373f09c" Nov 25 17:37:45 crc kubenswrapper[4812]: E1125 17:37:45.779264 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:37:46 crc kubenswrapper[4812]: I1125 17:37:46.713175 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-scheduler-0" Nov 25 17:37:46 crc kubenswrapper[4812]: I1125 17:37:46.748516 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.382967795 podStartE2EDuration="6.748494207s" podCreationTimestamp="2025-11-25 17:37:40 +0000 UTC" firstStartedPulling="2025-11-25 17:37:41.665082886 +0000 UTC m=+3036.505224981" lastFinishedPulling="2025-11-25 17:37:45.030609288 +0000 UTC m=+3039.870751393" observedRunningTime="2025-11-25 17:37:45.8165121 +0000 UTC m=+3040.656654195" watchObservedRunningTime="2025-11-25 17:37:46.748494207 +0000 UTC m=+3041.588636312" Nov 25 17:37:46 crc kubenswrapper[4812]: I1125 17:37:46.786879 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 17:37:46 crc kubenswrapper[4812]: I1125 17:37:46.796149 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-scheduler-0"] Nov 25 17:37:46 crc kubenswrapper[4812]: I1125 17:37:46.796434 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-scheduler-0" podUID="5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881" containerName="manila-scheduler" containerID="cri-o://bd5ffdea50931bd87fa1123e68e12e3776f8470ba757a8f0a9fd203890c37813" gracePeriod=30 Nov 25 17:37:46 crc kubenswrapper[4812]: I1125 17:37:46.796523 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-scheduler-0" podUID="5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881" containerName="probe" containerID="cri-o://328c33146a80d830475d93b5bdc173e65c9fd351d137600eade535a711dd1fb3" gracePeriod=30 Nov 25 17:37:47 crc kubenswrapper[4812]: I1125 17:37:47.797401 4812 generic.go:334] "Generic (PLEG): container finished" podID="5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881" containerID="328c33146a80d830475d93b5bdc173e65c9fd351d137600eade535a711dd1fb3" exitCode=0 Nov 25 17:37:47 crc kubenswrapper[4812]: I1125 17:37:47.797447 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881","Type":"ContainerDied","Data":"328c33146a80d830475d93b5bdc173e65c9fd351d137600eade535a711dd1fb3"} Nov 25 17:37:49 crc kubenswrapper[4812]: I1125 17:37:48.823447 4812 generic.go:334] "Generic (PLEG): container finished" podID="5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881" containerID="bd5ffdea50931bd87fa1123e68e12e3776f8470ba757a8f0a9fd203890c37813" exitCode=0 Nov 25 17:37:49 crc kubenswrapper[4812]: I1125 17:37:48.823833 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881","Type":"ContainerDied","Data":"bd5ffdea50931bd87fa1123e68e12e3776f8470ba757a8f0a9fd203890c37813"} Nov 25 17:37:49 crc kubenswrapper[4812]: I1125 17:37:49.034891 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 25 17:37:49 crc kubenswrapper[4812]: I1125 17:37:49.095205 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881-config-data-custom\") pod \"5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881\" (UID: \"5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881\") " Nov 25 17:37:49 crc kubenswrapper[4812]: I1125 17:37:49.095661 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881-etc-machine-id\") pod \"5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881\" (UID: \"5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881\") " Nov 25 17:37:49 crc kubenswrapper[4812]: I1125 17:37:49.095770 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881" (UID: "5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 17:37:49 crc kubenswrapper[4812]: I1125 17:37:49.095784 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881-config-data\") pod \"5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881\" (UID: \"5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881\") " Nov 25 17:37:49 crc kubenswrapper[4812]: I1125 17:37:49.095828 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881-scripts\") pod \"5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881\" (UID: \"5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881\") " Nov 25 17:37:49 crc kubenswrapper[4812]: I1125 17:37:49.095868 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-shjlb\" (UniqueName: \"kubernetes.io/projected/5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881-kube-api-access-shjlb\") pod \"5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881\" (UID: \"5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881\") " Nov 25 17:37:49 crc kubenswrapper[4812]: I1125 17:37:49.095890 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881-combined-ca-bundle\") pod \"5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881\" (UID: \"5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881\") " Nov 25 17:37:49 crc kubenswrapper[4812]: I1125 17:37:49.096456 4812 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:49 crc kubenswrapper[4812]: I1125 17:37:49.114007 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881-kube-api-access-shjlb" (OuterVolumeSpecName: "kube-api-access-shjlb") pod "5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881" (UID: "5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881"). InnerVolumeSpecName "kube-api-access-shjlb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:37:49 crc kubenswrapper[4812]: I1125 17:37:49.114148 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881" (UID: "5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:37:49 crc kubenswrapper[4812]: I1125 17:37:49.116697 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881-scripts" (OuterVolumeSpecName: "scripts") pod "5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881" (UID: "5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:37:49 crc kubenswrapper[4812]: I1125 17:37:49.167862 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881" (UID: "5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:37:49 crc kubenswrapper[4812]: I1125 17:37:49.198618 4812 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:49 crc kubenswrapper[4812]: I1125 17:37:49.198652 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-shjlb\" (UniqueName: \"kubernetes.io/projected/5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881-kube-api-access-shjlb\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:49 crc kubenswrapper[4812]: I1125 17:37:49.198665 4812 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:49 crc kubenswrapper[4812]: I1125 17:37:49.198676 4812 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:49 crc kubenswrapper[4812]: I1125 17:37:49.218913 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881-config-data" (OuterVolumeSpecName: "config-data") pod "5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881" (UID: "5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:37:49 crc kubenswrapper[4812]: I1125 17:37:49.300688 4812 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:49 crc kubenswrapper[4812]: I1125 17:37:49.301226 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-56b9b89f86-mb9q5" podUID="842381e2-aa1c-4a72-9db3-51bffd277741" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.248:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.248:8443: connect: connection refused" Nov 25 17:37:49 crc kubenswrapper[4812]: I1125 17:37:49.301331 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-56b9b89f86-mb9q5" Nov 25 17:37:49 crc kubenswrapper[4812]: I1125 17:37:49.844151 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881","Type":"ContainerDied","Data":"b30e254363dd7157fa014893dc73ad667891de79a8efc32b5e955604fa256c12"} Nov 25 17:37:49 crc kubenswrapper[4812]: I1125 17:37:49.844209 4812 scope.go:117] "RemoveContainer" containerID="328c33146a80d830475d93b5bdc173e65c9fd351d137600eade535a711dd1fb3" Nov 25 17:37:49 crc kubenswrapper[4812]: I1125 17:37:49.844256 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 25 17:37:49 crc kubenswrapper[4812]: I1125 17:37:49.921880 4812 scope.go:117] "RemoveContainer" containerID="bd5ffdea50931bd87fa1123e68e12e3776f8470ba757a8f0a9fd203890c37813" Nov 25 17:37:49 crc kubenswrapper[4812]: I1125 17:37:49.934585 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-scheduler-0"] Nov 25 17:37:49 crc kubenswrapper[4812]: I1125 17:37:49.957468 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-scheduler-0"] Nov 25 17:37:49 crc kubenswrapper[4812]: I1125 17:37:49.957544 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-scheduler-0"] Nov 25 17:37:49 crc kubenswrapper[4812]: E1125 17:37:49.957836 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881" containerName="manila-scheduler" Nov 25 17:37:49 crc kubenswrapper[4812]: I1125 17:37:49.957848 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881" containerName="manila-scheduler" Nov 25 17:37:49 crc kubenswrapper[4812]: E1125 17:37:49.957884 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881" containerName="probe" Nov 25 17:37:49 crc kubenswrapper[4812]: I1125 17:37:49.957889 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881" containerName="probe" Nov 25 17:37:49 crc kubenswrapper[4812]: I1125 17:37:49.958075 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881" containerName="manila-scheduler" Nov 25 17:37:49 crc kubenswrapper[4812]: I1125 17:37:49.958088 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881" containerName="probe" Nov 25 17:37:49 crc kubenswrapper[4812]: I1125 17:37:49.958941 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Nov 25 17:37:49 crc kubenswrapper[4812]: I1125 17:37:49.959010 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 25 17:37:49 crc kubenswrapper[4812]: I1125 17:37:49.969235 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-scheduler-config-data" Nov 25 17:37:50 crc kubenswrapper[4812]: I1125 17:37:50.016235 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c79f81a1-3a91-41f6-9901-8243dc28537e-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"c79f81a1-3a91-41f6-9901-8243dc28537e\") " pod="openstack/manila-scheduler-0" Nov 25 17:37:50 crc kubenswrapper[4812]: I1125 17:37:50.016573 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9vd8r\" (UniqueName: \"kubernetes.io/projected/c79f81a1-3a91-41f6-9901-8243dc28537e-kube-api-access-9vd8r\") pod \"manila-scheduler-0\" (UID: \"c79f81a1-3a91-41f6-9901-8243dc28537e\") " pod="openstack/manila-scheduler-0" Nov 25 17:37:50 crc kubenswrapper[4812]: I1125 17:37:50.016758 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c79f81a1-3a91-41f6-9901-8243dc28537e-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"c79f81a1-3a91-41f6-9901-8243dc28537e\") " pod="openstack/manila-scheduler-0" Nov 25 17:37:50 crc kubenswrapper[4812]: I1125 17:37:50.016912 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c79f81a1-3a91-41f6-9901-8243dc28537e-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"c79f81a1-3a91-41f6-9901-8243dc28537e\") " pod="openstack/manila-scheduler-0" Nov 25 17:37:50 crc kubenswrapper[4812]: I1125 17:37:50.017027 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c79f81a1-3a91-41f6-9901-8243dc28537e-scripts\") pod \"manila-scheduler-0\" (UID: \"c79f81a1-3a91-41f6-9901-8243dc28537e\") " pod="openstack/manila-scheduler-0" Nov 25 17:37:50 crc kubenswrapper[4812]: I1125 17:37:50.017130 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c79f81a1-3a91-41f6-9901-8243dc28537e-config-data\") pod \"manila-scheduler-0\" (UID: \"c79f81a1-3a91-41f6-9901-8243dc28537e\") " pod="openstack/manila-scheduler-0" Nov 25 17:37:50 crc kubenswrapper[4812]: I1125 17:37:50.123022 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c79f81a1-3a91-41f6-9901-8243dc28537e-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"c79f81a1-3a91-41f6-9901-8243dc28537e\") " pod="openstack/manila-scheduler-0" Nov 25 17:37:50 crc kubenswrapper[4812]: I1125 17:37:50.123101 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9vd8r\" (UniqueName: \"kubernetes.io/projected/c79f81a1-3a91-41f6-9901-8243dc28537e-kube-api-access-9vd8r\") pod \"manila-scheduler-0\" (UID: \"c79f81a1-3a91-41f6-9901-8243dc28537e\") " pod="openstack/manila-scheduler-0" Nov 25 17:37:50 crc kubenswrapper[4812]: I1125 17:37:50.123153 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c79f81a1-3a91-41f6-9901-8243dc28537e-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"c79f81a1-3a91-41f6-9901-8243dc28537e\") " pod="openstack/manila-scheduler-0" Nov 25 17:37:50 crc kubenswrapper[4812]: I1125 17:37:50.123157 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c79f81a1-3a91-41f6-9901-8243dc28537e-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"c79f81a1-3a91-41f6-9901-8243dc28537e\") " pod="openstack/manila-scheduler-0" Nov 25 17:37:50 crc kubenswrapper[4812]: I1125 17:37:50.123243 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c79f81a1-3a91-41f6-9901-8243dc28537e-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"c79f81a1-3a91-41f6-9901-8243dc28537e\") " pod="openstack/manila-scheduler-0" Nov 25 17:37:50 crc kubenswrapper[4812]: I1125 17:37:50.123289 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c79f81a1-3a91-41f6-9901-8243dc28537e-scripts\") pod \"manila-scheduler-0\" (UID: \"c79f81a1-3a91-41f6-9901-8243dc28537e\") " pod="openstack/manila-scheduler-0" Nov 25 17:37:50 crc kubenswrapper[4812]: I1125 17:37:50.123323 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c79f81a1-3a91-41f6-9901-8243dc28537e-config-data\") pod \"manila-scheduler-0\" (UID: \"c79f81a1-3a91-41f6-9901-8243dc28537e\") " pod="openstack/manila-scheduler-0" Nov 25 17:37:50 crc kubenswrapper[4812]: I1125 17:37:50.141196 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c79f81a1-3a91-41f6-9901-8243dc28537e-scripts\") pod \"manila-scheduler-0\" (UID: \"c79f81a1-3a91-41f6-9901-8243dc28537e\") " pod="openstack/manila-scheduler-0" Nov 25 17:37:50 crc kubenswrapper[4812]: I1125 17:37:50.142456 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c79f81a1-3a91-41f6-9901-8243dc28537e-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"c79f81a1-3a91-41f6-9901-8243dc28537e\") " pod="openstack/manila-scheduler-0" Nov 25 17:37:50 crc kubenswrapper[4812]: I1125 17:37:50.142759 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c79f81a1-3a91-41f6-9901-8243dc28537e-config-data\") pod \"manila-scheduler-0\" (UID: \"c79f81a1-3a91-41f6-9901-8243dc28537e\") " pod="openstack/manila-scheduler-0" Nov 25 17:37:50 crc kubenswrapper[4812]: I1125 17:37:50.146909 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c79f81a1-3a91-41f6-9901-8243dc28537e-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"c79f81a1-3a91-41f6-9901-8243dc28537e\") " pod="openstack/manila-scheduler-0" Nov 25 17:37:50 crc kubenswrapper[4812]: I1125 17:37:50.153841 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9vd8r\" (UniqueName: \"kubernetes.io/projected/c79f81a1-3a91-41f6-9901-8243dc28537e-kube-api-access-9vd8r\") pod \"manila-scheduler-0\" (UID: \"c79f81a1-3a91-41f6-9901-8243dc28537e\") " pod="openstack/manila-scheduler-0" Nov 25 17:37:50 crc kubenswrapper[4812]: I1125 17:37:50.295094 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 25 17:37:50 crc kubenswrapper[4812]: I1125 17:37:50.765698 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Nov 25 17:37:50 crc kubenswrapper[4812]: I1125 17:37:50.859010 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"c79f81a1-3a91-41f6-9901-8243dc28537e","Type":"ContainerStarted","Data":"e623463057b19bf30d307c5df1ad3179bbbf72c6b04b9d7dd58ce04d5d1c36e8"} Nov 25 17:37:51 crc kubenswrapper[4812]: I1125 17:37:51.165143 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:37:51 crc kubenswrapper[4812]: I1125 17:37:51.206965 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:37:51 crc kubenswrapper[4812]: I1125 17:37:51.843645 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881" path="/var/lib/kubelet/pods/5ffb9542-2fd7-4d38-8b11-ab8fc1fd9881/volumes" Nov 25 17:37:51 crc kubenswrapper[4812]: I1125 17:37:51.868883 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"c79f81a1-3a91-41f6-9901-8243dc28537e","Type":"ContainerStarted","Data":"ed4e7d8409c9715bd66c83226c051ed16315bcb8ccce213e7a13ade10390f788"} Nov 25 17:37:51 crc kubenswrapper[4812]: I1125 17:37:51.868925 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"c79f81a1-3a91-41f6-9901-8243dc28537e","Type":"ContainerStarted","Data":"0caeb305606350bb797ce226d3d7e02f4dd54e5dfdf73b6bfa4ab662a2d20b7c"} Nov 25 17:37:51 crc kubenswrapper[4812]: I1125 17:37:51.902755 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-scheduler-0" podStartSLOduration=2.9027378600000002 podStartE2EDuration="2.90273786s" podCreationTimestamp="2025-11-25 17:37:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 17:37:51.892389929 +0000 UTC m=+3046.732532044" watchObservedRunningTime="2025-11-25 17:37:51.90273786 +0000 UTC m=+3046.742879955" Nov 25 17:37:53 crc kubenswrapper[4812]: I1125 17:37:53.789564 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-56b9b89f86-mb9q5" Nov 25 17:37:53 crc kubenswrapper[4812]: I1125 17:37:53.900082 4812 generic.go:334] "Generic (PLEG): container finished" podID="842381e2-aa1c-4a72-9db3-51bffd277741" containerID="dc11d24fa10df7ada01e58433293390cdba4b7a79b7c797d3b8bb4f3f5b24c60" exitCode=137 Nov 25 17:37:53 crc kubenswrapper[4812]: I1125 17:37:53.900125 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-56b9b89f86-mb9q5" event={"ID":"842381e2-aa1c-4a72-9db3-51bffd277741","Type":"ContainerDied","Data":"dc11d24fa10df7ada01e58433293390cdba4b7a79b7c797d3b8bb4f3f5b24c60"} Nov 25 17:37:53 crc kubenswrapper[4812]: I1125 17:37:53.900150 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-56b9b89f86-mb9q5" event={"ID":"842381e2-aa1c-4a72-9db3-51bffd277741","Type":"ContainerDied","Data":"9f85af0737f18e61a05991fab55904ee941745c4b8b505b847a808d8fa0c5ca4"} Nov 25 17:37:53 crc kubenswrapper[4812]: I1125 17:37:53.900166 4812 scope.go:117] "RemoveContainer" containerID="79aabd3f78f8666bdd980605f49b9f4c43c64e4d9069224cc6717d0fb00878f5" Nov 25 17:37:53 crc kubenswrapper[4812]: I1125 17:37:53.900725 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-56b9b89f86-mb9q5" Nov 25 17:37:53 crc kubenswrapper[4812]: I1125 17:37:53.901550 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/842381e2-aa1c-4a72-9db3-51bffd277741-logs\") pod \"842381e2-aa1c-4a72-9db3-51bffd277741\" (UID: \"842381e2-aa1c-4a72-9db3-51bffd277741\") " Nov 25 17:37:53 crc kubenswrapper[4812]: I1125 17:37:53.901611 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/842381e2-aa1c-4a72-9db3-51bffd277741-horizon-secret-key\") pod \"842381e2-aa1c-4a72-9db3-51bffd277741\" (UID: \"842381e2-aa1c-4a72-9db3-51bffd277741\") " Nov 25 17:37:53 crc kubenswrapper[4812]: I1125 17:37:53.901665 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/842381e2-aa1c-4a72-9db3-51bffd277741-combined-ca-bundle\") pod \"842381e2-aa1c-4a72-9db3-51bffd277741\" (UID: \"842381e2-aa1c-4a72-9db3-51bffd277741\") " Nov 25 17:37:53 crc kubenswrapper[4812]: I1125 17:37:53.901720 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8xqvv\" (UniqueName: \"kubernetes.io/projected/842381e2-aa1c-4a72-9db3-51bffd277741-kube-api-access-8xqvv\") pod \"842381e2-aa1c-4a72-9db3-51bffd277741\" (UID: \"842381e2-aa1c-4a72-9db3-51bffd277741\") " Nov 25 17:37:53 crc kubenswrapper[4812]: I1125 17:37:53.901759 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/842381e2-aa1c-4a72-9db3-51bffd277741-horizon-tls-certs\") pod \"842381e2-aa1c-4a72-9db3-51bffd277741\" (UID: \"842381e2-aa1c-4a72-9db3-51bffd277741\") " Nov 25 17:37:53 crc kubenswrapper[4812]: I1125 17:37:53.901801 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/842381e2-aa1c-4a72-9db3-51bffd277741-config-data\") pod \"842381e2-aa1c-4a72-9db3-51bffd277741\" (UID: \"842381e2-aa1c-4a72-9db3-51bffd277741\") " Nov 25 17:37:53 crc kubenswrapper[4812]: I1125 17:37:53.901907 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/842381e2-aa1c-4a72-9db3-51bffd277741-scripts\") pod \"842381e2-aa1c-4a72-9db3-51bffd277741\" (UID: \"842381e2-aa1c-4a72-9db3-51bffd277741\") " Nov 25 17:37:53 crc kubenswrapper[4812]: I1125 17:37:53.902744 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/842381e2-aa1c-4a72-9db3-51bffd277741-logs" (OuterVolumeSpecName: "logs") pod "842381e2-aa1c-4a72-9db3-51bffd277741" (UID: "842381e2-aa1c-4a72-9db3-51bffd277741"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:37:53 crc kubenswrapper[4812]: I1125 17:37:53.910611 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/842381e2-aa1c-4a72-9db3-51bffd277741-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "842381e2-aa1c-4a72-9db3-51bffd277741" (UID: "842381e2-aa1c-4a72-9db3-51bffd277741"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:37:53 crc kubenswrapper[4812]: I1125 17:37:53.915987 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/842381e2-aa1c-4a72-9db3-51bffd277741-kube-api-access-8xqvv" (OuterVolumeSpecName: "kube-api-access-8xqvv") pod "842381e2-aa1c-4a72-9db3-51bffd277741" (UID: "842381e2-aa1c-4a72-9db3-51bffd277741"). InnerVolumeSpecName "kube-api-access-8xqvv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:37:53 crc kubenswrapper[4812]: I1125 17:37:53.935180 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/842381e2-aa1c-4a72-9db3-51bffd277741-config-data" (OuterVolumeSpecName: "config-data") pod "842381e2-aa1c-4a72-9db3-51bffd277741" (UID: "842381e2-aa1c-4a72-9db3-51bffd277741"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:37:53 crc kubenswrapper[4812]: I1125 17:37:53.938794 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/842381e2-aa1c-4a72-9db3-51bffd277741-scripts" (OuterVolumeSpecName: "scripts") pod "842381e2-aa1c-4a72-9db3-51bffd277741" (UID: "842381e2-aa1c-4a72-9db3-51bffd277741"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:37:53 crc kubenswrapper[4812]: I1125 17:37:53.943667 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/842381e2-aa1c-4a72-9db3-51bffd277741-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "842381e2-aa1c-4a72-9db3-51bffd277741" (UID: "842381e2-aa1c-4a72-9db3-51bffd277741"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:37:53 crc kubenswrapper[4812]: I1125 17:37:53.962196 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/842381e2-aa1c-4a72-9db3-51bffd277741-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "842381e2-aa1c-4a72-9db3-51bffd277741" (UID: "842381e2-aa1c-4a72-9db3-51bffd277741"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:37:54 crc kubenswrapper[4812]: I1125 17:37:54.004420 4812 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/842381e2-aa1c-4a72-9db3-51bffd277741-logs\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:54 crc kubenswrapper[4812]: I1125 17:37:54.006412 4812 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/842381e2-aa1c-4a72-9db3-51bffd277741-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:54 crc kubenswrapper[4812]: I1125 17:37:54.006520 4812 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/842381e2-aa1c-4a72-9db3-51bffd277741-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:54 crc kubenswrapper[4812]: I1125 17:37:54.006620 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8xqvv\" (UniqueName: \"kubernetes.io/projected/842381e2-aa1c-4a72-9db3-51bffd277741-kube-api-access-8xqvv\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:54 crc kubenswrapper[4812]: I1125 17:37:54.006689 4812 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/842381e2-aa1c-4a72-9db3-51bffd277741-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:54 crc kubenswrapper[4812]: I1125 17:37:54.006755 4812 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/842381e2-aa1c-4a72-9db3-51bffd277741-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:54 crc kubenswrapper[4812]: I1125 17:37:54.006840 4812 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/842381e2-aa1c-4a72-9db3-51bffd277741-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 17:37:54 crc kubenswrapper[4812]: I1125 17:37:54.081518 4812 scope.go:117] "RemoveContainer" containerID="dc11d24fa10df7ada01e58433293390cdba4b7a79b7c797d3b8bb4f3f5b24c60" Nov 25 17:37:54 crc kubenswrapper[4812]: I1125 17:37:54.099146 4812 scope.go:117] "RemoveContainer" containerID="79aabd3f78f8666bdd980605f49b9f4c43c64e4d9069224cc6717d0fb00878f5" Nov 25 17:37:54 crc kubenswrapper[4812]: E1125 17:37:54.099664 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"79aabd3f78f8666bdd980605f49b9f4c43c64e4d9069224cc6717d0fb00878f5\": container with ID starting with 79aabd3f78f8666bdd980605f49b9f4c43c64e4d9069224cc6717d0fb00878f5 not found: ID does not exist" containerID="79aabd3f78f8666bdd980605f49b9f4c43c64e4d9069224cc6717d0fb00878f5" Nov 25 17:37:54 crc kubenswrapper[4812]: I1125 17:37:54.099711 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"79aabd3f78f8666bdd980605f49b9f4c43c64e4d9069224cc6717d0fb00878f5"} err="failed to get container status \"79aabd3f78f8666bdd980605f49b9f4c43c64e4d9069224cc6717d0fb00878f5\": rpc error: code = NotFound desc = could not find container \"79aabd3f78f8666bdd980605f49b9f4c43c64e4d9069224cc6717d0fb00878f5\": container with ID starting with 79aabd3f78f8666bdd980605f49b9f4c43c64e4d9069224cc6717d0fb00878f5 not found: ID does not exist" Nov 25 17:37:54 crc kubenswrapper[4812]: I1125 17:37:54.099740 4812 scope.go:117] "RemoveContainer" containerID="dc11d24fa10df7ada01e58433293390cdba4b7a79b7c797d3b8bb4f3f5b24c60" Nov 25 17:37:54 crc kubenswrapper[4812]: E1125 17:37:54.100118 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dc11d24fa10df7ada01e58433293390cdba4b7a79b7c797d3b8bb4f3f5b24c60\": container with ID starting with dc11d24fa10df7ada01e58433293390cdba4b7a79b7c797d3b8bb4f3f5b24c60 not found: ID does not exist" containerID="dc11d24fa10df7ada01e58433293390cdba4b7a79b7c797d3b8bb4f3f5b24c60" Nov 25 17:37:54 crc kubenswrapper[4812]: I1125 17:37:54.100162 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc11d24fa10df7ada01e58433293390cdba4b7a79b7c797d3b8bb4f3f5b24c60"} err="failed to get container status \"dc11d24fa10df7ada01e58433293390cdba4b7a79b7c797d3b8bb4f3f5b24c60\": rpc error: code = NotFound desc = could not find container \"dc11d24fa10df7ada01e58433293390cdba4b7a79b7c797d3b8bb4f3f5b24c60\": container with ID starting with dc11d24fa10df7ada01e58433293390cdba4b7a79b7c797d3b8bb4f3f5b24c60 not found: ID does not exist" Nov 25 17:37:54 crc kubenswrapper[4812]: I1125 17:37:54.245051 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-56b9b89f86-mb9q5"] Nov 25 17:37:54 crc kubenswrapper[4812]: I1125 17:37:54.261743 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-56b9b89f86-mb9q5"] Nov 25 17:37:54 crc kubenswrapper[4812]: I1125 17:37:54.832234 4812 scope.go:117] "RemoveContainer" containerID="2ae50c0855798672cbbf542aacde517f5fc4d85f0598c9ce69441f2a863ae0cd" Nov 25 17:37:54 crc kubenswrapper[4812]: E1125 17:37:54.832777 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:37:55 crc kubenswrapper[4812]: I1125 17:37:55.842061 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="842381e2-aa1c-4a72-9db3-51bffd277741" path="/var/lib/kubelet/pods/842381e2-aa1c-4a72-9db3-51bffd277741/volumes" Nov 25 17:38:00 crc kubenswrapper[4812]: I1125 17:38:00.295331 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-scheduler-0" Nov 25 17:38:00 crc kubenswrapper[4812]: I1125 17:38:00.832042 4812 scope.go:117] "RemoveContainer" containerID="fcee6a1e99e0314c5643be1ff9987a4421641277fcd013b16340ed50b373f09c" Nov 25 17:38:01 crc kubenswrapper[4812]: I1125 17:38:01.203788 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:38:01 crc kubenswrapper[4812]: I1125 17:38:01.207139 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:38:02 crc kubenswrapper[4812]: I1125 17:38:02.001818 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"8af2d4e3-790f-4ab4-92e8-1c0a083b9531","Type":"ContainerStarted","Data":"cd1cc6ad6ff39981a45ce046a2b94df610c0a494ce9f7ee0ad2e248a5c9cf12d"} Nov 25 17:38:03 crc kubenswrapper[4812]: I1125 17:38:03.019573 4812 generic.go:334] "Generic (PLEG): container finished" podID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" containerID="cd1cc6ad6ff39981a45ce046a2b94df610c0a494ce9f7ee0ad2e248a5c9cf12d" exitCode=1 Nov 25 17:38:03 crc kubenswrapper[4812]: I1125 17:38:03.019600 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"8af2d4e3-790f-4ab4-92e8-1c0a083b9531","Type":"ContainerDied","Data":"cd1cc6ad6ff39981a45ce046a2b94df610c0a494ce9f7ee0ad2e248a5c9cf12d"} Nov 25 17:38:03 crc kubenswrapper[4812]: I1125 17:38:03.021228 4812 scope.go:117] "RemoveContainer" containerID="fcee6a1e99e0314c5643be1ff9987a4421641277fcd013b16340ed50b373f09c" Nov 25 17:38:03 crc kubenswrapper[4812]: I1125 17:38:03.023505 4812 scope.go:117] "RemoveContainer" containerID="cd1cc6ad6ff39981a45ce046a2b94df610c0a494ce9f7ee0ad2e248a5c9cf12d" Nov 25 17:38:03 crc kubenswrapper[4812]: E1125 17:38:03.024170 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:38:05 crc kubenswrapper[4812]: I1125 17:38:05.164132 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 25 17:38:05 crc kubenswrapper[4812]: I1125 17:38:05.164515 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 25 17:38:05 crc kubenswrapper[4812]: I1125 17:38:05.164553 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 25 17:38:05 crc kubenswrapper[4812]: I1125 17:38:05.165369 4812 scope.go:117] "RemoveContainer" containerID="cd1cc6ad6ff39981a45ce046a2b94df610c0a494ce9f7ee0ad2e248a5c9cf12d" Nov 25 17:38:05 crc kubenswrapper[4812]: E1125 17:38:05.165711 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:38:05 crc kubenswrapper[4812]: I1125 17:38:05.839756 4812 scope.go:117] "RemoveContainer" containerID="2ae50c0855798672cbbf542aacde517f5fc4d85f0598c9ce69441f2a863ae0cd" Nov 25 17:38:05 crc kubenswrapper[4812]: E1125 17:38:05.840054 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:38:10 crc kubenswrapper[4812]: I1125 17:38:10.196987 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:38:10 crc kubenswrapper[4812]: I1125 17:38:10.197810 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/manila-api-0" Nov 25 17:38:10 crc kubenswrapper[4812]: I1125 17:38:10.199130 4812 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="manila-api" containerStatusID={"Type":"cri-o","ID":"0c5bf9c6c7264bb69602a9bfe8754b6df89eb3c5b2fbc7e4de36f3a244f3f03e"} pod="openstack/manila-api-0" containerMessage="Container manila-api failed liveness probe, will be restarted" Nov 25 17:38:10 crc kubenswrapper[4812]: I1125 17:38:10.199195 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" containerID="cri-o://0c5bf9c6c7264bb69602a9bfe8754b6df89eb3c5b2fbc7e4de36f3a244f3f03e" gracePeriod=30 Nov 25 17:38:10 crc kubenswrapper[4812]: I1125 17:38:10.201799 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:38:10 crc kubenswrapper[4812]: I1125 17:38:10.205202 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="Get \"https://10.217.1.1:8786/healthcheck\": EOF" Nov 25 17:38:11 crc kubenswrapper[4812]: I1125 17:38:11.123302 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 25 17:38:11 crc kubenswrapper[4812]: I1125 17:38:11.767732 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-scheduler-0" Nov 25 17:38:14 crc kubenswrapper[4812]: I1125 17:38:14.147164 4812 generic.go:334] "Generic (PLEG): container finished" podID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerID="0c5bf9c6c7264bb69602a9bfe8754b6df89eb3c5b2fbc7e4de36f3a244f3f03e" exitCode=0 Nov 25 17:38:14 crc kubenswrapper[4812]: I1125 17:38:14.147251 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"6d2f6681-d1d6-45e9-b0f4-65209caf0069","Type":"ContainerDied","Data":"0c5bf9c6c7264bb69602a9bfe8754b6df89eb3c5b2fbc7e4de36f3a244f3f03e"} Nov 25 17:38:14 crc kubenswrapper[4812]: I1125 17:38:14.147792 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"6d2f6681-d1d6-45e9-b0f4-65209caf0069","Type":"ContainerStarted","Data":"93dc4e01b8ccbc13e583ff20c3188992c8cdac9236c411a0c69d6fdd1680a804"} Nov 25 17:38:14 crc kubenswrapper[4812]: I1125 17:38:14.148004 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/manila-api-0" Nov 25 17:38:19 crc kubenswrapper[4812]: I1125 17:38:19.830965 4812 scope.go:117] "RemoveContainer" containerID="cd1cc6ad6ff39981a45ce046a2b94df610c0a494ce9f7ee0ad2e248a5c9cf12d" Nov 25 17:38:19 crc kubenswrapper[4812]: E1125 17:38:19.831824 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:38:20 crc kubenswrapper[4812]: I1125 17:38:20.832588 4812 scope.go:117] "RemoveContainer" containerID="2ae50c0855798672cbbf542aacde517f5fc4d85f0598c9ce69441f2a863ae0cd" Nov 25 17:38:20 crc kubenswrapper[4812]: E1125 17:38:20.833493 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:38:30 crc kubenswrapper[4812]: I1125 17:38:30.831909 4812 scope.go:117] "RemoveContainer" containerID="cd1cc6ad6ff39981a45ce046a2b94df610c0a494ce9f7ee0ad2e248a5c9cf12d" Nov 25 17:38:31 crc kubenswrapper[4812]: I1125 17:38:31.218349 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:38:31 crc kubenswrapper[4812]: I1125 17:38:31.272402 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:38:31 crc kubenswrapper[4812]: I1125 17:38:31.837444 4812 scope.go:117] "RemoveContainer" containerID="2ae50c0855798672cbbf542aacde517f5fc4d85f0598c9ce69441f2a863ae0cd" Nov 25 17:38:31 crc kubenswrapper[4812]: E1125 17:38:31.837915 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:38:32 crc kubenswrapper[4812]: I1125 17:38:32.362548 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"8af2d4e3-790f-4ab4-92e8-1c0a083b9531","Type":"ContainerStarted","Data":"eca197e0c075a001b93e7a906f5853d9117e9cdfda18ab2312007510daf6d950"} Nov 25 17:38:33 crc kubenswrapper[4812]: I1125 17:38:33.381659 4812 generic.go:334] "Generic (PLEG): container finished" podID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" containerID="eca197e0c075a001b93e7a906f5853d9117e9cdfda18ab2312007510daf6d950" exitCode=1 Nov 25 17:38:33 crc kubenswrapper[4812]: I1125 17:38:33.381744 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"8af2d4e3-790f-4ab4-92e8-1c0a083b9531","Type":"ContainerDied","Data":"eca197e0c075a001b93e7a906f5853d9117e9cdfda18ab2312007510daf6d950"} Nov 25 17:38:33 crc kubenswrapper[4812]: I1125 17:38:33.381886 4812 scope.go:117] "RemoveContainer" containerID="cd1cc6ad6ff39981a45ce046a2b94df610c0a494ce9f7ee0ad2e248a5c9cf12d" Nov 25 17:38:33 crc kubenswrapper[4812]: I1125 17:38:33.383282 4812 scope.go:117] "RemoveContainer" containerID="eca197e0c075a001b93e7a906f5853d9117e9cdfda18ab2312007510daf6d950" Nov 25 17:38:33 crc kubenswrapper[4812]: E1125 17:38:33.384004 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 40s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:38:35 crc kubenswrapper[4812]: I1125 17:38:35.164598 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 25 17:38:35 crc kubenswrapper[4812]: I1125 17:38:35.165415 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 25 17:38:35 crc kubenswrapper[4812]: I1125 17:38:35.166596 4812 scope.go:117] "RemoveContainer" containerID="eca197e0c075a001b93e7a906f5853d9117e9cdfda18ab2312007510daf6d950" Nov 25 17:38:35 crc kubenswrapper[4812]: E1125 17:38:35.167315 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 40s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:38:41 crc kubenswrapper[4812]: I1125 17:38:41.140776 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:38:41 crc kubenswrapper[4812]: I1125 17:38:41.271913 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:38:44 crc kubenswrapper[4812]: I1125 17:38:44.832276 4812 scope.go:117] "RemoveContainer" containerID="2ae50c0855798672cbbf542aacde517f5fc4d85f0598c9ce69441f2a863ae0cd" Nov 25 17:38:44 crc kubenswrapper[4812]: E1125 17:38:44.833130 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:38:45 crc kubenswrapper[4812]: I1125 17:38:45.164842 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 25 17:38:45 crc kubenswrapper[4812]: I1125 17:38:45.166057 4812 scope.go:117] "RemoveContainer" containerID="eca197e0c075a001b93e7a906f5853d9117e9cdfda18ab2312007510daf6d950" Nov 25 17:38:45 crc kubenswrapper[4812]: E1125 17:38:45.166746 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 40s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:38:50 crc kubenswrapper[4812]: I1125 17:38:50.191905 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:38:50 crc kubenswrapper[4812]: I1125 17:38:50.191920 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:38:50 crc kubenswrapper[4812]: I1125 17:38:50.192425 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/manila-api-0" Nov 25 17:38:50 crc kubenswrapper[4812]: I1125 17:38:50.193248 4812 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="manila-api" containerStatusID={"Type":"cri-o","ID":"93dc4e01b8ccbc13e583ff20c3188992c8cdac9236c411a0c69d6fdd1680a804"} pod="openstack/manila-api-0" containerMessage="Container manila-api failed liveness probe, will be restarted" Nov 25 17:38:50 crc kubenswrapper[4812]: I1125 17:38:50.193289 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" containerID="cri-o://93dc4e01b8ccbc13e583ff20c3188992c8cdac9236c411a0c69d6fdd1680a804" gracePeriod=30 Nov 25 17:38:50 crc kubenswrapper[4812]: I1125 17:38:50.200193 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="Get \"https://10.217.1.1:8786/healthcheck\": EOF" Nov 25 17:38:53 crc kubenswrapper[4812]: I1125 17:38:53.642960 4812 generic.go:334] "Generic (PLEG): container finished" podID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerID="93dc4e01b8ccbc13e583ff20c3188992c8cdac9236c411a0c69d6fdd1680a804" exitCode=0 Nov 25 17:38:53 crc kubenswrapper[4812]: I1125 17:38:53.643021 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"6d2f6681-d1d6-45e9-b0f4-65209caf0069","Type":"ContainerDied","Data":"93dc4e01b8ccbc13e583ff20c3188992c8cdac9236c411a0c69d6fdd1680a804"} Nov 25 17:38:53 crc kubenswrapper[4812]: I1125 17:38:53.643400 4812 scope.go:117] "RemoveContainer" containerID="0c5bf9c6c7264bb69602a9bfe8754b6df89eb3c5b2fbc7e4de36f3a244f3f03e" Nov 25 17:38:54 crc kubenswrapper[4812]: I1125 17:38:54.660379 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"6d2f6681-d1d6-45e9-b0f4-65209caf0069","Type":"ContainerStarted","Data":"fe365c92a41c60c2584c96a3674bda4ae40edb18c11513144e411804e37557d1"} Nov 25 17:38:54 crc kubenswrapper[4812]: I1125 17:38:54.660815 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/manila-api-0" Nov 25 17:38:55 crc kubenswrapper[4812]: I1125 17:38:55.837575 4812 scope.go:117] "RemoveContainer" containerID="2ae50c0855798672cbbf542aacde517f5fc4d85f0598c9ce69441f2a863ae0cd" Nov 25 17:38:55 crc kubenswrapper[4812]: E1125 17:38:55.838041 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:38:59 crc kubenswrapper[4812]: I1125 17:38:59.833805 4812 scope.go:117] "RemoveContainer" containerID="eca197e0c075a001b93e7a906f5853d9117e9cdfda18ab2312007510daf6d950" Nov 25 17:38:59 crc kubenswrapper[4812]: E1125 17:38:59.834796 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 40s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:39:09 crc kubenswrapper[4812]: I1125 17:39:09.832439 4812 scope.go:117] "RemoveContainer" containerID="2ae50c0855798672cbbf542aacde517f5fc4d85f0598c9ce69441f2a863ae0cd" Nov 25 17:39:09 crc kubenswrapper[4812]: E1125 17:39:09.833252 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:39:11 crc kubenswrapper[4812]: I1125 17:39:11.232989 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:39:11 crc kubenswrapper[4812]: I1125 17:39:11.283916 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:39:13 crc kubenswrapper[4812]: I1125 17:39:13.833228 4812 scope.go:117] "RemoveContainer" containerID="eca197e0c075a001b93e7a906f5853d9117e9cdfda18ab2312007510daf6d950" Nov 25 17:39:14 crc kubenswrapper[4812]: I1125 17:39:14.890356 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"8af2d4e3-790f-4ab4-92e8-1c0a083b9531","Type":"ContainerStarted","Data":"1f8c1c8549be500753878a138c1df749bb63ad56c35e70a0256a7be0cec3d094"} Nov 25 17:39:15 crc kubenswrapper[4812]: I1125 17:39:15.164343 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 25 17:39:15 crc kubenswrapper[4812]: I1125 17:39:15.900737 4812 generic.go:334] "Generic (PLEG): container finished" podID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" containerID="1f8c1c8549be500753878a138c1df749bb63ad56c35e70a0256a7be0cec3d094" exitCode=1 Nov 25 17:39:15 crc kubenswrapper[4812]: I1125 17:39:15.900833 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"8af2d4e3-790f-4ab4-92e8-1c0a083b9531","Type":"ContainerDied","Data":"1f8c1c8549be500753878a138c1df749bb63ad56c35e70a0256a7be0cec3d094"} Nov 25 17:39:15 crc kubenswrapper[4812]: I1125 17:39:15.901093 4812 scope.go:117] "RemoveContainer" containerID="eca197e0c075a001b93e7a906f5853d9117e9cdfda18ab2312007510daf6d950" Nov 25 17:39:15 crc kubenswrapper[4812]: I1125 17:39:15.901573 4812 scope.go:117] "RemoveContainer" containerID="1f8c1c8549be500753878a138c1df749bb63ad56c35e70a0256a7be0cec3d094" Nov 25 17:39:15 crc kubenswrapper[4812]: E1125 17:39:15.902131 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:39:16 crc kubenswrapper[4812]: I1125 17:39:16.912515 4812 scope.go:117] "RemoveContainer" containerID="1f8c1c8549be500753878a138c1df749bb63ad56c35e70a0256a7be0cec3d094" Nov 25 17:39:16 crc kubenswrapper[4812]: E1125 17:39:16.912858 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:39:21 crc kubenswrapper[4812]: I1125 17:39:21.235812 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:39:21 crc kubenswrapper[4812]: I1125 17:39:21.249285 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:39:22 crc kubenswrapper[4812]: I1125 17:39:22.831621 4812 scope.go:117] "RemoveContainer" containerID="2ae50c0855798672cbbf542aacde517f5fc4d85f0598c9ce69441f2a863ae0cd" Nov 25 17:39:22 crc kubenswrapper[4812]: E1125 17:39:22.832371 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:39:25 crc kubenswrapper[4812]: I1125 17:39:25.164277 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 25 17:39:25 crc kubenswrapper[4812]: I1125 17:39:25.164753 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 25 17:39:25 crc kubenswrapper[4812]: I1125 17:39:25.165761 4812 scope.go:117] "RemoveContainer" containerID="1f8c1c8549be500753878a138c1df749bb63ad56c35e70a0256a7be0cec3d094" Nov 25 17:39:25 crc kubenswrapper[4812]: E1125 17:39:25.166352 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:39:30 crc kubenswrapper[4812]: I1125 17:39:30.199012 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:39:30 crc kubenswrapper[4812]: I1125 17:39:30.199074 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:39:30 crc kubenswrapper[4812]: I1125 17:39:30.199830 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/manila-api-0" Nov 25 17:39:30 crc kubenswrapper[4812]: I1125 17:39:30.201049 4812 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="manila-api" containerStatusID={"Type":"cri-o","ID":"fe365c92a41c60c2584c96a3674bda4ae40edb18c11513144e411804e37557d1"} pod="openstack/manila-api-0" containerMessage="Container manila-api failed liveness probe, will be restarted" Nov 25 17:39:30 crc kubenswrapper[4812]: I1125 17:39:30.201113 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" containerID="cri-o://fe365c92a41c60c2584c96a3674bda4ae40edb18c11513144e411804e37557d1" gracePeriod=30 Nov 25 17:39:30 crc kubenswrapper[4812]: I1125 17:39:30.208481 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:39:33 crc kubenswrapper[4812]: I1125 17:39:33.831933 4812 scope.go:117] "RemoveContainer" containerID="2ae50c0855798672cbbf542aacde517f5fc4d85f0598c9ce69441f2a863ae0cd" Nov 25 17:39:33 crc kubenswrapper[4812]: E1125 17:39:33.832836 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:39:34 crc kubenswrapper[4812]: I1125 17:39:34.089435 4812 generic.go:334] "Generic (PLEG): container finished" podID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerID="fe365c92a41c60c2584c96a3674bda4ae40edb18c11513144e411804e37557d1" exitCode=0 Nov 25 17:39:34 crc kubenswrapper[4812]: I1125 17:39:34.089475 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"6d2f6681-d1d6-45e9-b0f4-65209caf0069","Type":"ContainerDied","Data":"fe365c92a41c60c2584c96a3674bda4ae40edb18c11513144e411804e37557d1"} Nov 25 17:39:34 crc kubenswrapper[4812]: I1125 17:39:34.089503 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"6d2f6681-d1d6-45e9-b0f4-65209caf0069","Type":"ContainerStarted","Data":"c57f8b3d554f7f6d1d99db403efdb5bc99287baad417ba59b7c8db2281aa8721"} Nov 25 17:39:34 crc kubenswrapper[4812]: I1125 17:39:34.089518 4812 scope.go:117] "RemoveContainer" containerID="93dc4e01b8ccbc13e583ff20c3188992c8cdac9236c411a0c69d6fdd1680a804" Nov 25 17:39:34 crc kubenswrapper[4812]: I1125 17:39:34.090092 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/manila-api-0" Nov 25 17:39:37 crc kubenswrapper[4812]: I1125 17:39:37.832512 4812 scope.go:117] "RemoveContainer" containerID="1f8c1c8549be500753878a138c1df749bb63ad56c35e70a0256a7be0cec3d094" Nov 25 17:39:37 crc kubenswrapper[4812]: E1125 17:39:37.834154 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:39:45 crc kubenswrapper[4812]: I1125 17:39:45.844043 4812 scope.go:117] "RemoveContainer" containerID="2ae50c0855798672cbbf542aacde517f5fc4d85f0598c9ce69441f2a863ae0cd" Nov 25 17:39:45 crc kubenswrapper[4812]: E1125 17:39:45.846181 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:39:49 crc kubenswrapper[4812]: I1125 17:39:49.832277 4812 scope.go:117] "RemoveContainer" containerID="1f8c1c8549be500753878a138c1df749bb63ad56c35e70a0256a7be0cec3d094" Nov 25 17:39:49 crc kubenswrapper[4812]: E1125 17:39:49.833152 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:39:51 crc kubenswrapper[4812]: I1125 17:39:51.221561 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:39:51 crc kubenswrapper[4812]: I1125 17:39:51.272969 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:39:56 crc kubenswrapper[4812]: I1125 17:39:56.832140 4812 scope.go:117] "RemoveContainer" containerID="2ae50c0855798672cbbf542aacde517f5fc4d85f0598c9ce69441f2a863ae0cd" Nov 25 17:39:56 crc kubenswrapper[4812]: E1125 17:39:56.833077 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:40:00 crc kubenswrapper[4812]: I1125 17:40:00.832616 4812 scope.go:117] "RemoveContainer" containerID="1f8c1c8549be500753878a138c1df749bb63ad56c35e70a0256a7be0cec3d094" Nov 25 17:40:00 crc kubenswrapper[4812]: E1125 17:40:00.833686 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:40:01 crc kubenswrapper[4812]: I1125 17:40:01.148880 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:40:01 crc kubenswrapper[4812]: I1125 17:40:01.206699 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:40:07 crc kubenswrapper[4812]: I1125 17:40:07.832432 4812 scope.go:117] "RemoveContainer" containerID="2ae50c0855798672cbbf542aacde517f5fc4d85f0598c9ce69441f2a863ae0cd" Nov 25 17:40:07 crc kubenswrapper[4812]: E1125 17:40:07.833601 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:40:10 crc kubenswrapper[4812]: I1125 17:40:10.196201 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:40:10 crc kubenswrapper[4812]: I1125 17:40:10.196626 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/manila-api-0" Nov 25 17:40:10 crc kubenswrapper[4812]: I1125 17:40:10.196361 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:40:10 crc kubenswrapper[4812]: I1125 17:40:10.197919 4812 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="manila-api" containerStatusID={"Type":"cri-o","ID":"c57f8b3d554f7f6d1d99db403efdb5bc99287baad417ba59b7c8db2281aa8721"} pod="openstack/manila-api-0" containerMessage="Container manila-api failed liveness probe, will be restarted" Nov 25 17:40:10 crc kubenswrapper[4812]: I1125 17:40:10.197985 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" containerID="cri-o://c57f8b3d554f7f6d1d99db403efdb5bc99287baad417ba59b7c8db2281aa8721" gracePeriod=30 Nov 25 17:40:10 crc kubenswrapper[4812]: I1125 17:40:10.204974 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:40:13 crc kubenswrapper[4812]: I1125 17:40:13.536982 4812 generic.go:334] "Generic (PLEG): container finished" podID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerID="c57f8b3d554f7f6d1d99db403efdb5bc99287baad417ba59b7c8db2281aa8721" exitCode=0 Nov 25 17:40:13 crc kubenswrapper[4812]: I1125 17:40:13.537039 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"6d2f6681-d1d6-45e9-b0f4-65209caf0069","Type":"ContainerDied","Data":"c57f8b3d554f7f6d1d99db403efdb5bc99287baad417ba59b7c8db2281aa8721"} Nov 25 17:40:13 crc kubenswrapper[4812]: I1125 17:40:13.537604 4812 scope.go:117] "RemoveContainer" containerID="fe365c92a41c60c2584c96a3674bda4ae40edb18c11513144e411804e37557d1" Nov 25 17:40:14 crc kubenswrapper[4812]: I1125 17:40:14.550040 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"6d2f6681-d1d6-45e9-b0f4-65209caf0069","Type":"ContainerStarted","Data":"dd24f8e4f8dcd74528afeea4ab3b8f53a73b5675b39c468125f25e43c8ab09b3"} Nov 25 17:40:14 crc kubenswrapper[4812]: I1125 17:40:14.550601 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/manila-api-0" Nov 25 17:40:15 crc kubenswrapper[4812]: I1125 17:40:15.856104 4812 scope.go:117] "RemoveContainer" containerID="1f8c1c8549be500753878a138c1df749bb63ad56c35e70a0256a7be0cec3d094" Nov 25 17:40:15 crc kubenswrapper[4812]: E1125 17:40:15.859349 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:40:19 crc kubenswrapper[4812]: I1125 17:40:19.831786 4812 scope.go:117] "RemoveContainer" containerID="2ae50c0855798672cbbf542aacde517f5fc4d85f0598c9ce69441f2a863ae0cd" Nov 25 17:40:19 crc kubenswrapper[4812]: E1125 17:40:19.832470 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:40:29 crc kubenswrapper[4812]: I1125 17:40:29.849350 4812 scope.go:117] "RemoveContainer" containerID="1f8c1c8549be500753878a138c1df749bb63ad56c35e70a0256a7be0cec3d094" Nov 25 17:40:29 crc kubenswrapper[4812]: E1125 17:40:29.851171 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:40:31 crc kubenswrapper[4812]: I1125 17:40:31.193353 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:40:31 crc kubenswrapper[4812]: I1125 17:40:31.195852 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:40:32 crc kubenswrapper[4812]: I1125 17:40:32.832195 4812 scope.go:117] "RemoveContainer" containerID="2ae50c0855798672cbbf542aacde517f5fc4d85f0598c9ce69441f2a863ae0cd" Nov 25 17:40:32 crc kubenswrapper[4812]: E1125 17:40:32.833244 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:40:41 crc kubenswrapper[4812]: I1125 17:40:41.117493 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:40:41 crc kubenswrapper[4812]: I1125 17:40:41.175203 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:40:41 crc kubenswrapper[4812]: I1125 17:40:41.831759 4812 scope.go:117] "RemoveContainer" containerID="1f8c1c8549be500753878a138c1df749bb63ad56c35e70a0256a7be0cec3d094" Nov 25 17:40:42 crc kubenswrapper[4812]: I1125 17:40:42.856965 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"8af2d4e3-790f-4ab4-92e8-1c0a083b9531","Type":"ContainerStarted","Data":"458f7c14029f1e754183e3cc2d614f0a35ee989439f7524c07671bf2e9fe647a"} Nov 25 17:40:43 crc kubenswrapper[4812]: I1125 17:40:43.869884 4812 generic.go:334] "Generic (PLEG): container finished" podID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" containerID="458f7c14029f1e754183e3cc2d614f0a35ee989439f7524c07671bf2e9fe647a" exitCode=1 Nov 25 17:40:43 crc kubenswrapper[4812]: I1125 17:40:43.869950 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"8af2d4e3-790f-4ab4-92e8-1c0a083b9531","Type":"ContainerDied","Data":"458f7c14029f1e754183e3cc2d614f0a35ee989439f7524c07671bf2e9fe647a"} Nov 25 17:40:43 crc kubenswrapper[4812]: I1125 17:40:43.870373 4812 scope.go:117] "RemoveContainer" containerID="1f8c1c8549be500753878a138c1df749bb63ad56c35e70a0256a7be0cec3d094" Nov 25 17:40:43 crc kubenswrapper[4812]: I1125 17:40:43.871555 4812 scope.go:117] "RemoveContainer" containerID="458f7c14029f1e754183e3cc2d614f0a35ee989439f7524c07671bf2e9fe647a" Nov 25 17:40:43 crc kubenswrapper[4812]: E1125 17:40:43.871856 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:40:44 crc kubenswrapper[4812]: I1125 17:40:44.832574 4812 scope.go:117] "RemoveContainer" containerID="2ae50c0855798672cbbf542aacde517f5fc4d85f0598c9ce69441f2a863ae0cd" Nov 25 17:40:44 crc kubenswrapper[4812]: E1125 17:40:44.833433 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:40:45 crc kubenswrapper[4812]: I1125 17:40:45.164451 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 25 17:40:45 crc kubenswrapper[4812]: I1125 17:40:45.164489 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 25 17:40:45 crc kubenswrapper[4812]: I1125 17:40:45.164569 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 25 17:40:45 crc kubenswrapper[4812]: I1125 17:40:45.164880 4812 scope.go:117] "RemoveContainer" containerID="458f7c14029f1e754183e3cc2d614f0a35ee989439f7524c07671bf2e9fe647a" Nov 25 17:40:45 crc kubenswrapper[4812]: E1125 17:40:45.165692 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:40:45 crc kubenswrapper[4812]: I1125 17:40:45.897254 4812 scope.go:117] "RemoveContainer" containerID="458f7c14029f1e754183e3cc2d614f0a35ee989439f7524c07671bf2e9fe647a" Nov 25 17:40:45 crc kubenswrapper[4812]: E1125 17:40:45.897974 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:40:50 crc kubenswrapper[4812]: I1125 17:40:50.195443 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:40:50 crc kubenswrapper[4812]: I1125 17:40:50.198327 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:40:50 crc kubenswrapper[4812]: I1125 17:40:50.198420 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/manila-api-0" Nov 25 17:40:50 crc kubenswrapper[4812]: I1125 17:40:50.199454 4812 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="manila-api" containerStatusID={"Type":"cri-o","ID":"dd24f8e4f8dcd74528afeea4ab3b8f53a73b5675b39c468125f25e43c8ab09b3"} pod="openstack/manila-api-0" containerMessage="Container manila-api failed liveness probe, will be restarted" Nov 25 17:40:50 crc kubenswrapper[4812]: I1125 17:40:50.199506 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" containerID="cri-o://dd24f8e4f8dcd74528afeea4ab3b8f53a73b5675b39c468125f25e43c8ab09b3" gracePeriod=30 Nov 25 17:40:50 crc kubenswrapper[4812]: I1125 17:40:50.206096 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:40:53 crc kubenswrapper[4812]: I1125 17:40:53.986456 4812 generic.go:334] "Generic (PLEG): container finished" podID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerID="dd24f8e4f8dcd74528afeea4ab3b8f53a73b5675b39c468125f25e43c8ab09b3" exitCode=0 Nov 25 17:40:53 crc kubenswrapper[4812]: I1125 17:40:53.986544 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"6d2f6681-d1d6-45e9-b0f4-65209caf0069","Type":"ContainerDied","Data":"dd24f8e4f8dcd74528afeea4ab3b8f53a73b5675b39c468125f25e43c8ab09b3"} Nov 25 17:40:53 crc kubenswrapper[4812]: I1125 17:40:53.986870 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"6d2f6681-d1d6-45e9-b0f4-65209caf0069","Type":"ContainerStarted","Data":"8634b319ac05f5f0135c5b54e5f80ca9464531cc260fdcbc06631ba8597cf0c1"} Nov 25 17:40:53 crc kubenswrapper[4812]: I1125 17:40:53.986896 4812 scope.go:117] "RemoveContainer" containerID="c57f8b3d554f7f6d1d99db403efdb5bc99287baad417ba59b7c8db2281aa8721" Nov 25 17:40:53 crc kubenswrapper[4812]: I1125 17:40:53.987045 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/manila-api-0" Nov 25 17:40:56 crc kubenswrapper[4812]: I1125 17:40:56.832207 4812 scope.go:117] "RemoveContainer" containerID="458f7c14029f1e754183e3cc2d614f0a35ee989439f7524c07671bf2e9fe647a" Nov 25 17:40:56 crc kubenswrapper[4812]: E1125 17:40:56.833284 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:40:58 crc kubenswrapper[4812]: I1125 17:40:58.831856 4812 scope.go:117] "RemoveContainer" containerID="2ae50c0855798672cbbf542aacde517f5fc4d85f0598c9ce69441f2a863ae0cd" Nov 25 17:40:58 crc kubenswrapper[4812]: E1125 17:40:58.832431 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:41:08 crc kubenswrapper[4812]: I1125 17:41:08.832601 4812 scope.go:117] "RemoveContainer" containerID="458f7c14029f1e754183e3cc2d614f0a35ee989439f7524c07671bf2e9fe647a" Nov 25 17:41:08 crc kubenswrapper[4812]: E1125 17:41:08.833645 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:41:11 crc kubenswrapper[4812]: I1125 17:41:11.248659 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:41:11 crc kubenswrapper[4812]: I1125 17:41:11.376771 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:41:11 crc kubenswrapper[4812]: I1125 17:41:11.835953 4812 scope.go:117] "RemoveContainer" containerID="2ae50c0855798672cbbf542aacde517f5fc4d85f0598c9ce69441f2a863ae0cd" Nov 25 17:41:11 crc kubenswrapper[4812]: E1125 17:41:11.837166 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:41:20 crc kubenswrapper[4812]: I1125 17:41:20.831557 4812 scope.go:117] "RemoveContainer" containerID="458f7c14029f1e754183e3cc2d614f0a35ee989439f7524c07671bf2e9fe647a" Nov 25 17:41:20 crc kubenswrapper[4812]: E1125 17:41:20.832759 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:41:21 crc kubenswrapper[4812]: I1125 17:41:21.220053 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:41:21 crc kubenswrapper[4812]: I1125 17:41:21.250691 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:41:22 crc kubenswrapper[4812]: I1125 17:41:22.832841 4812 scope.go:117] "RemoveContainer" containerID="2ae50c0855798672cbbf542aacde517f5fc4d85f0598c9ce69441f2a863ae0cd" Nov 25 17:41:22 crc kubenswrapper[4812]: E1125 17:41:22.833971 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:41:30 crc kubenswrapper[4812]: I1125 17:41:30.195089 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:41:30 crc kubenswrapper[4812]: I1125 17:41:30.213043 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:41:30 crc kubenswrapper[4812]: I1125 17:41:30.213129 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/manila-api-0" Nov 25 17:41:30 crc kubenswrapper[4812]: I1125 17:41:30.214436 4812 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="manila-api" containerStatusID={"Type":"cri-o","ID":"8634b319ac05f5f0135c5b54e5f80ca9464531cc260fdcbc06631ba8597cf0c1"} pod="openstack/manila-api-0" containerMessage="Container manila-api failed liveness probe, will be restarted" Nov 25 17:41:30 crc kubenswrapper[4812]: I1125 17:41:30.214489 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" containerID="cri-o://8634b319ac05f5f0135c5b54e5f80ca9464531cc260fdcbc06631ba8597cf0c1" gracePeriod=30 Nov 25 17:41:30 crc kubenswrapper[4812]: I1125 17:41:30.229963 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="Get \"https://10.217.1.1:8786/healthcheck\": EOF" Nov 25 17:41:33 crc kubenswrapper[4812]: E1125 17:41:33.463002 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:41:34 crc kubenswrapper[4812]: I1125 17:41:34.431086 4812 generic.go:334] "Generic (PLEG): container finished" podID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerID="8634b319ac05f5f0135c5b54e5f80ca9464531cc260fdcbc06631ba8597cf0c1" exitCode=0 Nov 25 17:41:34 crc kubenswrapper[4812]: I1125 17:41:34.431185 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"6d2f6681-d1d6-45e9-b0f4-65209caf0069","Type":"ContainerDied","Data":"8634b319ac05f5f0135c5b54e5f80ca9464531cc260fdcbc06631ba8597cf0c1"} Nov 25 17:41:34 crc kubenswrapper[4812]: I1125 17:41:34.431512 4812 scope.go:117] "RemoveContainer" containerID="dd24f8e4f8dcd74528afeea4ab3b8f53a73b5675b39c468125f25e43c8ab09b3" Nov 25 17:41:34 crc kubenswrapper[4812]: I1125 17:41:34.432862 4812 scope.go:117] "RemoveContainer" containerID="8634b319ac05f5f0135c5b54e5f80ca9464531cc260fdcbc06631ba8597cf0c1" Nov 25 17:41:34 crc kubenswrapper[4812]: E1125 17:41:34.433291 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:41:35 crc kubenswrapper[4812]: I1125 17:41:35.839689 4812 scope.go:117] "RemoveContainer" containerID="2ae50c0855798672cbbf542aacde517f5fc4d85f0598c9ce69441f2a863ae0cd" Nov 25 17:41:35 crc kubenswrapper[4812]: I1125 17:41:35.840106 4812 scope.go:117] "RemoveContainer" containerID="458f7c14029f1e754183e3cc2d614f0a35ee989439f7524c07671bf2e9fe647a" Nov 25 17:41:35 crc kubenswrapper[4812]: E1125 17:41:35.840314 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:41:35 crc kubenswrapper[4812]: E1125 17:41:35.840474 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:41:45 crc kubenswrapper[4812]: I1125 17:41:45.846781 4812 scope.go:117] "RemoveContainer" containerID="8634b319ac05f5f0135c5b54e5f80ca9464531cc260fdcbc06631ba8597cf0c1" Nov 25 17:41:45 crc kubenswrapper[4812]: E1125 17:41:45.849364 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:41:46 crc kubenswrapper[4812]: I1125 17:41:46.832743 4812 scope.go:117] "RemoveContainer" containerID="2ae50c0855798672cbbf542aacde517f5fc4d85f0598c9ce69441f2a863ae0cd" Nov 25 17:41:46 crc kubenswrapper[4812]: E1125 17:41:46.833513 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:41:50 crc kubenswrapper[4812]: I1125 17:41:50.832178 4812 scope.go:117] "RemoveContainer" containerID="458f7c14029f1e754183e3cc2d614f0a35ee989439f7524c07671bf2e9fe647a" Nov 25 17:41:50 crc kubenswrapper[4812]: E1125 17:41:50.832932 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:41:59 crc kubenswrapper[4812]: I1125 17:41:59.831498 4812 scope.go:117] "RemoveContainer" containerID="8634b319ac05f5f0135c5b54e5f80ca9464531cc260fdcbc06631ba8597cf0c1" Nov 25 17:41:59 crc kubenswrapper[4812]: E1125 17:41:59.832409 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:42:01 crc kubenswrapper[4812]: I1125 17:42:01.831814 4812 scope.go:117] "RemoveContainer" containerID="2ae50c0855798672cbbf542aacde517f5fc4d85f0598c9ce69441f2a863ae0cd" Nov 25 17:42:02 crc kubenswrapper[4812]: I1125 17:42:02.779792 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" event={"ID":"8ed911cf-2139-4b12-84ba-af635585ba29","Type":"ContainerStarted","Data":"0acc40c7b955f4eaa4833a720fa59f543dc276d015523fb120f2e257ee7a126e"} Nov 25 17:42:04 crc kubenswrapper[4812]: I1125 17:42:04.832719 4812 scope.go:117] "RemoveContainer" containerID="458f7c14029f1e754183e3cc2d614f0a35ee989439f7524c07671bf2e9fe647a" Nov 25 17:42:04 crc kubenswrapper[4812]: E1125 17:42:04.833948 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:42:11 crc kubenswrapper[4812]: I1125 17:42:11.831966 4812 scope.go:117] "RemoveContainer" containerID="8634b319ac05f5f0135c5b54e5f80ca9464531cc260fdcbc06631ba8597cf0c1" Nov 25 17:42:11 crc kubenswrapper[4812]: E1125 17:42:11.833669 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:42:16 crc kubenswrapper[4812]: I1125 17:42:16.833363 4812 scope.go:117] "RemoveContainer" containerID="458f7c14029f1e754183e3cc2d614f0a35ee989439f7524c07671bf2e9fe647a" Nov 25 17:42:16 crc kubenswrapper[4812]: E1125 17:42:16.835094 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:42:26 crc kubenswrapper[4812]: I1125 17:42:26.835936 4812 scope.go:117] "RemoveContainer" containerID="8634b319ac05f5f0135c5b54e5f80ca9464531cc260fdcbc06631ba8597cf0c1" Nov 25 17:42:26 crc kubenswrapper[4812]: E1125 17:42:26.837234 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:42:31 crc kubenswrapper[4812]: I1125 17:42:31.832881 4812 scope.go:117] "RemoveContainer" containerID="458f7c14029f1e754183e3cc2d614f0a35ee989439f7524c07671bf2e9fe647a" Nov 25 17:42:31 crc kubenswrapper[4812]: E1125 17:42:31.833498 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:42:38 crc kubenswrapper[4812]: I1125 17:42:38.832082 4812 scope.go:117] "RemoveContainer" containerID="8634b319ac05f5f0135c5b54e5f80ca9464531cc260fdcbc06631ba8597cf0c1" Nov 25 17:42:38 crc kubenswrapper[4812]: E1125 17:42:38.833451 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:42:42 crc kubenswrapper[4812]: I1125 17:42:42.833049 4812 scope.go:117] "RemoveContainer" containerID="458f7c14029f1e754183e3cc2d614f0a35ee989439f7524c07671bf2e9fe647a" Nov 25 17:42:42 crc kubenswrapper[4812]: E1125 17:42:42.834315 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:42:51 crc kubenswrapper[4812]: I1125 17:42:51.831732 4812 scope.go:117] "RemoveContainer" containerID="8634b319ac05f5f0135c5b54e5f80ca9464531cc260fdcbc06631ba8597cf0c1" Nov 25 17:42:51 crc kubenswrapper[4812]: E1125 17:42:51.832552 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:42:57 crc kubenswrapper[4812]: I1125 17:42:57.835995 4812 scope.go:117] "RemoveContainer" containerID="458f7c14029f1e754183e3cc2d614f0a35ee989439f7524c07671bf2e9fe647a" Nov 25 17:42:57 crc kubenswrapper[4812]: E1125 17:42:57.837158 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:43:03 crc kubenswrapper[4812]: I1125 17:43:03.832400 4812 scope.go:117] "RemoveContainer" containerID="8634b319ac05f5f0135c5b54e5f80ca9464531cc260fdcbc06631ba8597cf0c1" Nov 25 17:43:04 crc kubenswrapper[4812]: I1125 17:43:04.505699 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"6d2f6681-d1d6-45e9-b0f4-65209caf0069","Type":"ContainerStarted","Data":"cc81341589bedf078d8e6c38e6438372a5e8be2fdd7f9bd85d4ec454ce34725e"} Nov 25 17:43:04 crc kubenswrapper[4812]: I1125 17:43:04.506257 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/manila-api-0" Nov 25 17:43:10 crc kubenswrapper[4812]: I1125 17:43:10.834851 4812 scope.go:117] "RemoveContainer" containerID="458f7c14029f1e754183e3cc2d614f0a35ee989439f7524c07671bf2e9fe647a" Nov 25 17:43:10 crc kubenswrapper[4812]: E1125 17:43:10.835839 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:43:21 crc kubenswrapper[4812]: I1125 17:43:21.160869 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:43:21 crc kubenswrapper[4812]: I1125 17:43:21.163621 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:43:23 crc kubenswrapper[4812]: I1125 17:43:23.831550 4812 scope.go:117] "RemoveContainer" containerID="458f7c14029f1e754183e3cc2d614f0a35ee989439f7524c07671bf2e9fe647a" Nov 25 17:43:24 crc kubenswrapper[4812]: I1125 17:43:24.771041 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"8af2d4e3-790f-4ab4-92e8-1c0a083b9531","Type":"ContainerStarted","Data":"af9a1c4eb103063dbf8a3589a0292d12e52b5584993c70b3cf1f1f79bcc8a717"} Nov 25 17:43:25 crc kubenswrapper[4812]: I1125 17:43:25.163691 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 25 17:43:25 crc kubenswrapper[4812]: I1125 17:43:25.786505 4812 generic.go:334] "Generic (PLEG): container finished" podID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" containerID="af9a1c4eb103063dbf8a3589a0292d12e52b5584993c70b3cf1f1f79bcc8a717" exitCode=1 Nov 25 17:43:25 crc kubenswrapper[4812]: I1125 17:43:25.786559 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"8af2d4e3-790f-4ab4-92e8-1c0a083b9531","Type":"ContainerDied","Data":"af9a1c4eb103063dbf8a3589a0292d12e52b5584993c70b3cf1f1f79bcc8a717"} Nov 25 17:43:25 crc kubenswrapper[4812]: I1125 17:43:25.787291 4812 scope.go:117] "RemoveContainer" containerID="458f7c14029f1e754183e3cc2d614f0a35ee989439f7524c07671bf2e9fe647a" Nov 25 17:43:25 crc kubenswrapper[4812]: I1125 17:43:25.787476 4812 scope.go:117] "RemoveContainer" containerID="af9a1c4eb103063dbf8a3589a0292d12e52b5584993c70b3cf1f1f79bcc8a717" Nov 25 17:43:25 crc kubenswrapper[4812]: E1125 17:43:25.787959 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:43:26 crc kubenswrapper[4812]: I1125 17:43:26.803512 4812 scope.go:117] "RemoveContainer" containerID="af9a1c4eb103063dbf8a3589a0292d12e52b5584993c70b3cf1f1f79bcc8a717" Nov 25 17:43:26 crc kubenswrapper[4812]: E1125 17:43:26.804279 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:43:31 crc kubenswrapper[4812]: I1125 17:43:31.106858 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:43:31 crc kubenswrapper[4812]: I1125 17:43:31.162428 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:43:35 crc kubenswrapper[4812]: I1125 17:43:35.164006 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 25 17:43:35 crc kubenswrapper[4812]: I1125 17:43:35.165997 4812 scope.go:117] "RemoveContainer" containerID="af9a1c4eb103063dbf8a3589a0292d12e52b5584993c70b3cf1f1f79bcc8a717" Nov 25 17:43:35 crc kubenswrapper[4812]: E1125 17:43:35.166495 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:43:40 crc kubenswrapper[4812]: I1125 17:43:40.197610 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:43:40 crc kubenswrapper[4812]: I1125 17:43:40.198295 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/manila-api-0" Nov 25 17:43:40 crc kubenswrapper[4812]: I1125 17:43:40.199758 4812 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="manila-api" containerStatusID={"Type":"cri-o","ID":"cc81341589bedf078d8e6c38e6438372a5e8be2fdd7f9bd85d4ec454ce34725e"} pod="openstack/manila-api-0" containerMessage="Container manila-api failed liveness probe, will be restarted" Nov 25 17:43:40 crc kubenswrapper[4812]: I1125 17:43:40.199826 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" containerID="cri-o://cc81341589bedf078d8e6c38e6438372a5e8be2fdd7f9bd85d4ec454ce34725e" gracePeriod=30 Nov 25 17:43:40 crc kubenswrapper[4812]: I1125 17:43:40.200615 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:43:40 crc kubenswrapper[4812]: I1125 17:43:40.209571 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="Get \"https://10.217.1.1:8786/healthcheck\": EOF" Nov 25 17:43:43 crc kubenswrapper[4812]: E1125 17:43:43.433155 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:43:44 crc kubenswrapper[4812]: I1125 17:43:44.059016 4812 generic.go:334] "Generic (PLEG): container finished" podID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerID="cc81341589bedf078d8e6c38e6438372a5e8be2fdd7f9bd85d4ec454ce34725e" exitCode=0 Nov 25 17:43:44 crc kubenswrapper[4812]: I1125 17:43:44.059085 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"6d2f6681-d1d6-45e9-b0f4-65209caf0069","Type":"ContainerDied","Data":"cc81341589bedf078d8e6c38e6438372a5e8be2fdd7f9bd85d4ec454ce34725e"} Nov 25 17:43:44 crc kubenswrapper[4812]: I1125 17:43:44.059127 4812 scope.go:117] "RemoveContainer" containerID="8634b319ac05f5f0135c5b54e5f80ca9464531cc260fdcbc06631ba8597cf0c1" Nov 25 17:43:44 crc kubenswrapper[4812]: I1125 17:43:44.060074 4812 scope.go:117] "RemoveContainer" containerID="cc81341589bedf078d8e6c38e6438372a5e8be2fdd7f9bd85d4ec454ce34725e" Nov 25 17:43:44 crc kubenswrapper[4812]: E1125 17:43:44.060720 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:43:45 crc kubenswrapper[4812]: I1125 17:43:45.164442 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 25 17:43:45 crc kubenswrapper[4812]: I1125 17:43:45.166061 4812 scope.go:117] "RemoveContainer" containerID="af9a1c4eb103063dbf8a3589a0292d12e52b5584993c70b3cf1f1f79bcc8a717" Nov 25 17:43:45 crc kubenswrapper[4812]: E1125 17:43:45.166598 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:43:47 crc kubenswrapper[4812]: I1125 17:43:47.848392 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-nhvrl"] Nov 25 17:43:47 crc kubenswrapper[4812]: E1125 17:43:47.849723 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="842381e2-aa1c-4a72-9db3-51bffd277741" containerName="horizon-log" Nov 25 17:43:47 crc kubenswrapper[4812]: I1125 17:43:47.849751 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="842381e2-aa1c-4a72-9db3-51bffd277741" containerName="horizon-log" Nov 25 17:43:47 crc kubenswrapper[4812]: E1125 17:43:47.849833 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="842381e2-aa1c-4a72-9db3-51bffd277741" containerName="horizon" Nov 25 17:43:47 crc kubenswrapper[4812]: I1125 17:43:47.849845 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="842381e2-aa1c-4a72-9db3-51bffd277741" containerName="horizon" Nov 25 17:43:47 crc kubenswrapper[4812]: I1125 17:43:47.850175 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="842381e2-aa1c-4a72-9db3-51bffd277741" containerName="horizon" Nov 25 17:43:47 crc kubenswrapper[4812]: I1125 17:43:47.850249 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="842381e2-aa1c-4a72-9db3-51bffd277741" containerName="horizon-log" Nov 25 17:43:47 crc kubenswrapper[4812]: I1125 17:43:47.869307 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-nhvrl"] Nov 25 17:43:47 crc kubenswrapper[4812]: I1125 17:43:47.869503 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nhvrl" Nov 25 17:43:48 crc kubenswrapper[4812]: I1125 17:43:48.025412 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c62c6374-9c53-4d85-a54b-5aba76cf722c-catalog-content\") pod \"redhat-marketplace-nhvrl\" (UID: \"c62c6374-9c53-4d85-a54b-5aba76cf722c\") " pod="openshift-marketplace/redhat-marketplace-nhvrl" Nov 25 17:43:48 crc kubenswrapper[4812]: I1125 17:43:48.026234 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c62c6374-9c53-4d85-a54b-5aba76cf722c-utilities\") pod \"redhat-marketplace-nhvrl\" (UID: \"c62c6374-9c53-4d85-a54b-5aba76cf722c\") " pod="openshift-marketplace/redhat-marketplace-nhvrl" Nov 25 17:43:48 crc kubenswrapper[4812]: I1125 17:43:48.026440 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fsfmg\" (UniqueName: \"kubernetes.io/projected/c62c6374-9c53-4d85-a54b-5aba76cf722c-kube-api-access-fsfmg\") pod \"redhat-marketplace-nhvrl\" (UID: \"c62c6374-9c53-4d85-a54b-5aba76cf722c\") " pod="openshift-marketplace/redhat-marketplace-nhvrl" Nov 25 17:43:48 crc kubenswrapper[4812]: I1125 17:43:48.129466 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c62c6374-9c53-4d85-a54b-5aba76cf722c-catalog-content\") pod \"redhat-marketplace-nhvrl\" (UID: \"c62c6374-9c53-4d85-a54b-5aba76cf722c\") " pod="openshift-marketplace/redhat-marketplace-nhvrl" Nov 25 17:43:48 crc kubenswrapper[4812]: I1125 17:43:48.129846 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c62c6374-9c53-4d85-a54b-5aba76cf722c-utilities\") pod \"redhat-marketplace-nhvrl\" (UID: \"c62c6374-9c53-4d85-a54b-5aba76cf722c\") " pod="openshift-marketplace/redhat-marketplace-nhvrl" Nov 25 17:43:48 crc kubenswrapper[4812]: I1125 17:43:48.129996 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fsfmg\" (UniqueName: \"kubernetes.io/projected/c62c6374-9c53-4d85-a54b-5aba76cf722c-kube-api-access-fsfmg\") pod \"redhat-marketplace-nhvrl\" (UID: \"c62c6374-9c53-4d85-a54b-5aba76cf722c\") " pod="openshift-marketplace/redhat-marketplace-nhvrl" Nov 25 17:43:48 crc kubenswrapper[4812]: I1125 17:43:48.130439 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c62c6374-9c53-4d85-a54b-5aba76cf722c-catalog-content\") pod \"redhat-marketplace-nhvrl\" (UID: \"c62c6374-9c53-4d85-a54b-5aba76cf722c\") " pod="openshift-marketplace/redhat-marketplace-nhvrl" Nov 25 17:43:48 crc kubenswrapper[4812]: I1125 17:43:48.130480 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c62c6374-9c53-4d85-a54b-5aba76cf722c-utilities\") pod \"redhat-marketplace-nhvrl\" (UID: \"c62c6374-9c53-4d85-a54b-5aba76cf722c\") " pod="openshift-marketplace/redhat-marketplace-nhvrl" Nov 25 17:43:48 crc kubenswrapper[4812]: I1125 17:43:48.156504 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fsfmg\" (UniqueName: \"kubernetes.io/projected/c62c6374-9c53-4d85-a54b-5aba76cf722c-kube-api-access-fsfmg\") pod \"redhat-marketplace-nhvrl\" (UID: \"c62c6374-9c53-4d85-a54b-5aba76cf722c\") " pod="openshift-marketplace/redhat-marketplace-nhvrl" Nov 25 17:43:48 crc kubenswrapper[4812]: I1125 17:43:48.202499 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nhvrl" Nov 25 17:43:48 crc kubenswrapper[4812]: I1125 17:43:48.691823 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-nhvrl"] Nov 25 17:43:48 crc kubenswrapper[4812]: W1125 17:43:48.704692 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc62c6374_9c53_4d85_a54b_5aba76cf722c.slice/crio-5a0abaea3ed55b384708ae567be8a544016191c6f74ce0e73b54b2796d74911c WatchSource:0}: Error finding container 5a0abaea3ed55b384708ae567be8a544016191c6f74ce0e73b54b2796d74911c: Status 404 returned error can't find the container with id 5a0abaea3ed55b384708ae567be8a544016191c6f74ce0e73b54b2796d74911c Nov 25 17:43:49 crc kubenswrapper[4812]: I1125 17:43:49.126148 4812 generic.go:334] "Generic (PLEG): container finished" podID="c62c6374-9c53-4d85-a54b-5aba76cf722c" containerID="960707cd8965e3db4efe817fde34192e2f3abe5f0af17da7befc65f671cfc8ef" exitCode=0 Nov 25 17:43:49 crc kubenswrapper[4812]: I1125 17:43:49.126190 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nhvrl" event={"ID":"c62c6374-9c53-4d85-a54b-5aba76cf722c","Type":"ContainerDied","Data":"960707cd8965e3db4efe817fde34192e2f3abe5f0af17da7befc65f671cfc8ef"} Nov 25 17:43:49 crc kubenswrapper[4812]: I1125 17:43:49.126214 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nhvrl" event={"ID":"c62c6374-9c53-4d85-a54b-5aba76cf722c","Type":"ContainerStarted","Data":"5a0abaea3ed55b384708ae567be8a544016191c6f74ce0e73b54b2796d74911c"} Nov 25 17:43:49 crc kubenswrapper[4812]: I1125 17:43:49.128954 4812 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 17:43:50 crc kubenswrapper[4812]: I1125 17:43:50.143693 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nhvrl" event={"ID":"c62c6374-9c53-4d85-a54b-5aba76cf722c","Type":"ContainerStarted","Data":"b1db239ebd0c7caa7a6c250c9cc5c59f6a771505e836549b973beeb3d7e7f4c4"} Nov 25 17:43:51 crc kubenswrapper[4812]: I1125 17:43:51.158952 4812 generic.go:334] "Generic (PLEG): container finished" podID="c62c6374-9c53-4d85-a54b-5aba76cf722c" containerID="b1db239ebd0c7caa7a6c250c9cc5c59f6a771505e836549b973beeb3d7e7f4c4" exitCode=0 Nov 25 17:43:51 crc kubenswrapper[4812]: I1125 17:43:51.159078 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nhvrl" event={"ID":"c62c6374-9c53-4d85-a54b-5aba76cf722c","Type":"ContainerDied","Data":"b1db239ebd0c7caa7a6c250c9cc5c59f6a771505e836549b973beeb3d7e7f4c4"} Nov 25 17:43:52 crc kubenswrapper[4812]: I1125 17:43:52.173454 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nhvrl" event={"ID":"c62c6374-9c53-4d85-a54b-5aba76cf722c","Type":"ContainerStarted","Data":"12f2c56789709d127a1cd8cd61fd7b7e15cad941e714498481b706b3d2626b28"} Nov 25 17:43:52 crc kubenswrapper[4812]: I1125 17:43:52.203364 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-nhvrl" podStartSLOduration=2.718315868 podStartE2EDuration="5.203327412s" podCreationTimestamp="2025-11-25 17:43:47 +0000 UTC" firstStartedPulling="2025-11-25 17:43:49.128710909 +0000 UTC m=+3403.968853004" lastFinishedPulling="2025-11-25 17:43:51.613722413 +0000 UTC m=+3406.453864548" observedRunningTime="2025-11-25 17:43:52.2010446 +0000 UTC m=+3407.041186735" watchObservedRunningTime="2025-11-25 17:43:52.203327412 +0000 UTC m=+3407.043469547" Nov 25 17:43:58 crc kubenswrapper[4812]: I1125 17:43:58.202792 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-nhvrl" Nov 25 17:43:58 crc kubenswrapper[4812]: I1125 17:43:58.203675 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-nhvrl" Nov 25 17:43:58 crc kubenswrapper[4812]: I1125 17:43:58.272358 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-nhvrl" Nov 25 17:43:58 crc kubenswrapper[4812]: I1125 17:43:58.353804 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-nhvrl" Nov 25 17:43:58 crc kubenswrapper[4812]: I1125 17:43:58.529344 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-nhvrl"] Nov 25 17:43:58 crc kubenswrapper[4812]: I1125 17:43:58.831958 4812 scope.go:117] "RemoveContainer" containerID="cc81341589bedf078d8e6c38e6438372a5e8be2fdd7f9bd85d4ec454ce34725e" Nov 25 17:43:58 crc kubenswrapper[4812]: E1125 17:43:58.832356 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:44:00 crc kubenswrapper[4812]: I1125 17:44:00.297605 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-nhvrl" podUID="c62c6374-9c53-4d85-a54b-5aba76cf722c" containerName="registry-server" containerID="cri-o://12f2c56789709d127a1cd8cd61fd7b7e15cad941e714498481b706b3d2626b28" gracePeriod=2 Nov 25 17:44:00 crc kubenswrapper[4812]: I1125 17:44:00.832518 4812 scope.go:117] "RemoveContainer" containerID="af9a1c4eb103063dbf8a3589a0292d12e52b5584993c70b3cf1f1f79bcc8a717" Nov 25 17:44:00 crc kubenswrapper[4812]: E1125 17:44:00.833335 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:44:00 crc kubenswrapper[4812]: I1125 17:44:00.910192 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nhvrl" Nov 25 17:44:01 crc kubenswrapper[4812]: I1125 17:44:01.018465 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c62c6374-9c53-4d85-a54b-5aba76cf722c-utilities\") pod \"c62c6374-9c53-4d85-a54b-5aba76cf722c\" (UID: \"c62c6374-9c53-4d85-a54b-5aba76cf722c\") " Nov 25 17:44:01 crc kubenswrapper[4812]: I1125 17:44:01.018669 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c62c6374-9c53-4d85-a54b-5aba76cf722c-catalog-content\") pod \"c62c6374-9c53-4d85-a54b-5aba76cf722c\" (UID: \"c62c6374-9c53-4d85-a54b-5aba76cf722c\") " Nov 25 17:44:01 crc kubenswrapper[4812]: I1125 17:44:01.018933 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fsfmg\" (UniqueName: \"kubernetes.io/projected/c62c6374-9c53-4d85-a54b-5aba76cf722c-kube-api-access-fsfmg\") pod \"c62c6374-9c53-4d85-a54b-5aba76cf722c\" (UID: \"c62c6374-9c53-4d85-a54b-5aba76cf722c\") " Nov 25 17:44:01 crc kubenswrapper[4812]: I1125 17:44:01.019576 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c62c6374-9c53-4d85-a54b-5aba76cf722c-utilities" (OuterVolumeSpecName: "utilities") pod "c62c6374-9c53-4d85-a54b-5aba76cf722c" (UID: "c62c6374-9c53-4d85-a54b-5aba76cf722c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:44:01 crc kubenswrapper[4812]: I1125 17:44:01.027767 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c62c6374-9c53-4d85-a54b-5aba76cf722c-kube-api-access-fsfmg" (OuterVolumeSpecName: "kube-api-access-fsfmg") pod "c62c6374-9c53-4d85-a54b-5aba76cf722c" (UID: "c62c6374-9c53-4d85-a54b-5aba76cf722c"). InnerVolumeSpecName "kube-api-access-fsfmg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:44:01 crc kubenswrapper[4812]: I1125 17:44:01.049627 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c62c6374-9c53-4d85-a54b-5aba76cf722c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c62c6374-9c53-4d85-a54b-5aba76cf722c" (UID: "c62c6374-9c53-4d85-a54b-5aba76cf722c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:44:01 crc kubenswrapper[4812]: I1125 17:44:01.121850 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fsfmg\" (UniqueName: \"kubernetes.io/projected/c62c6374-9c53-4d85-a54b-5aba76cf722c-kube-api-access-fsfmg\") on node \"crc\" DevicePath \"\"" Nov 25 17:44:01 crc kubenswrapper[4812]: I1125 17:44:01.121888 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c62c6374-9c53-4d85-a54b-5aba76cf722c-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:44:01 crc kubenswrapper[4812]: I1125 17:44:01.121902 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c62c6374-9c53-4d85-a54b-5aba76cf722c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:44:01 crc kubenswrapper[4812]: I1125 17:44:01.308682 4812 generic.go:334] "Generic (PLEG): container finished" podID="c62c6374-9c53-4d85-a54b-5aba76cf722c" containerID="12f2c56789709d127a1cd8cd61fd7b7e15cad941e714498481b706b3d2626b28" exitCode=0 Nov 25 17:44:01 crc kubenswrapper[4812]: I1125 17:44:01.308729 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nhvrl" Nov 25 17:44:01 crc kubenswrapper[4812]: I1125 17:44:01.308749 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nhvrl" event={"ID":"c62c6374-9c53-4d85-a54b-5aba76cf722c","Type":"ContainerDied","Data":"12f2c56789709d127a1cd8cd61fd7b7e15cad941e714498481b706b3d2626b28"} Nov 25 17:44:01 crc kubenswrapper[4812]: I1125 17:44:01.309203 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nhvrl" event={"ID":"c62c6374-9c53-4d85-a54b-5aba76cf722c","Type":"ContainerDied","Data":"5a0abaea3ed55b384708ae567be8a544016191c6f74ce0e73b54b2796d74911c"} Nov 25 17:44:01 crc kubenswrapper[4812]: I1125 17:44:01.309229 4812 scope.go:117] "RemoveContainer" containerID="12f2c56789709d127a1cd8cd61fd7b7e15cad941e714498481b706b3d2626b28" Nov 25 17:44:01 crc kubenswrapper[4812]: I1125 17:44:01.333712 4812 scope.go:117] "RemoveContainer" containerID="b1db239ebd0c7caa7a6c250c9cc5c59f6a771505e836549b973beeb3d7e7f4c4" Nov 25 17:44:01 crc kubenswrapper[4812]: I1125 17:44:01.356333 4812 scope.go:117] "RemoveContainer" containerID="960707cd8965e3db4efe817fde34192e2f3abe5f0af17da7befc65f671cfc8ef" Nov 25 17:44:01 crc kubenswrapper[4812]: I1125 17:44:01.364490 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-nhvrl"] Nov 25 17:44:01 crc kubenswrapper[4812]: I1125 17:44:01.374604 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-nhvrl"] Nov 25 17:44:01 crc kubenswrapper[4812]: I1125 17:44:01.401470 4812 scope.go:117] "RemoveContainer" containerID="12f2c56789709d127a1cd8cd61fd7b7e15cad941e714498481b706b3d2626b28" Nov 25 17:44:01 crc kubenswrapper[4812]: E1125 17:44:01.401939 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"12f2c56789709d127a1cd8cd61fd7b7e15cad941e714498481b706b3d2626b28\": container with ID starting with 12f2c56789709d127a1cd8cd61fd7b7e15cad941e714498481b706b3d2626b28 not found: ID does not exist" containerID="12f2c56789709d127a1cd8cd61fd7b7e15cad941e714498481b706b3d2626b28" Nov 25 17:44:01 crc kubenswrapper[4812]: I1125 17:44:01.401966 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"12f2c56789709d127a1cd8cd61fd7b7e15cad941e714498481b706b3d2626b28"} err="failed to get container status \"12f2c56789709d127a1cd8cd61fd7b7e15cad941e714498481b706b3d2626b28\": rpc error: code = NotFound desc = could not find container \"12f2c56789709d127a1cd8cd61fd7b7e15cad941e714498481b706b3d2626b28\": container with ID starting with 12f2c56789709d127a1cd8cd61fd7b7e15cad941e714498481b706b3d2626b28 not found: ID does not exist" Nov 25 17:44:01 crc kubenswrapper[4812]: I1125 17:44:01.401987 4812 scope.go:117] "RemoveContainer" containerID="b1db239ebd0c7caa7a6c250c9cc5c59f6a771505e836549b973beeb3d7e7f4c4" Nov 25 17:44:01 crc kubenswrapper[4812]: E1125 17:44:01.402303 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b1db239ebd0c7caa7a6c250c9cc5c59f6a771505e836549b973beeb3d7e7f4c4\": container with ID starting with b1db239ebd0c7caa7a6c250c9cc5c59f6a771505e836549b973beeb3d7e7f4c4 not found: ID does not exist" containerID="b1db239ebd0c7caa7a6c250c9cc5c59f6a771505e836549b973beeb3d7e7f4c4" Nov 25 17:44:01 crc kubenswrapper[4812]: I1125 17:44:01.402328 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1db239ebd0c7caa7a6c250c9cc5c59f6a771505e836549b973beeb3d7e7f4c4"} err="failed to get container status \"b1db239ebd0c7caa7a6c250c9cc5c59f6a771505e836549b973beeb3d7e7f4c4\": rpc error: code = NotFound desc = could not find container \"b1db239ebd0c7caa7a6c250c9cc5c59f6a771505e836549b973beeb3d7e7f4c4\": container with ID starting with b1db239ebd0c7caa7a6c250c9cc5c59f6a771505e836549b973beeb3d7e7f4c4 not found: ID does not exist" Nov 25 17:44:01 crc kubenswrapper[4812]: I1125 17:44:01.402343 4812 scope.go:117] "RemoveContainer" containerID="960707cd8965e3db4efe817fde34192e2f3abe5f0af17da7befc65f671cfc8ef" Nov 25 17:44:01 crc kubenswrapper[4812]: E1125 17:44:01.402601 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"960707cd8965e3db4efe817fde34192e2f3abe5f0af17da7befc65f671cfc8ef\": container with ID starting with 960707cd8965e3db4efe817fde34192e2f3abe5f0af17da7befc65f671cfc8ef not found: ID does not exist" containerID="960707cd8965e3db4efe817fde34192e2f3abe5f0af17da7befc65f671cfc8ef" Nov 25 17:44:01 crc kubenswrapper[4812]: I1125 17:44:01.402624 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"960707cd8965e3db4efe817fde34192e2f3abe5f0af17da7befc65f671cfc8ef"} err="failed to get container status \"960707cd8965e3db4efe817fde34192e2f3abe5f0af17da7befc65f671cfc8ef\": rpc error: code = NotFound desc = could not find container \"960707cd8965e3db4efe817fde34192e2f3abe5f0af17da7befc65f671cfc8ef\": container with ID starting with 960707cd8965e3db4efe817fde34192e2f3abe5f0af17da7befc65f671cfc8ef not found: ID does not exist" Nov 25 17:44:01 crc kubenswrapper[4812]: E1125 17:44:01.455828 4812 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc62c6374_9c53_4d85_a54b_5aba76cf722c.slice\": RecentStats: unable to find data in memory cache]" Nov 25 17:44:01 crc kubenswrapper[4812]: I1125 17:44:01.849764 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c62c6374-9c53-4d85-a54b-5aba76cf722c" path="/var/lib/kubelet/pods/c62c6374-9c53-4d85-a54b-5aba76cf722c/volumes" Nov 25 17:44:11 crc kubenswrapper[4812]: I1125 17:44:11.832271 4812 scope.go:117] "RemoveContainer" containerID="cc81341589bedf078d8e6c38e6438372a5e8be2fdd7f9bd85d4ec454ce34725e" Nov 25 17:44:11 crc kubenswrapper[4812]: I1125 17:44:11.832925 4812 scope.go:117] "RemoveContainer" containerID="af9a1c4eb103063dbf8a3589a0292d12e52b5584993c70b3cf1f1f79bcc8a717" Nov 25 17:44:11 crc kubenswrapper[4812]: E1125 17:44:11.833037 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:44:11 crc kubenswrapper[4812]: E1125 17:44:11.833207 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:44:13 crc kubenswrapper[4812]: I1125 17:44:13.232410 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-tjqkm"] Nov 25 17:44:13 crc kubenswrapper[4812]: E1125 17:44:13.233973 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c62c6374-9c53-4d85-a54b-5aba76cf722c" containerName="registry-server" Nov 25 17:44:13 crc kubenswrapper[4812]: I1125 17:44:13.234067 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="c62c6374-9c53-4d85-a54b-5aba76cf722c" containerName="registry-server" Nov 25 17:44:13 crc kubenswrapper[4812]: E1125 17:44:13.234145 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c62c6374-9c53-4d85-a54b-5aba76cf722c" containerName="extract-utilities" Nov 25 17:44:13 crc kubenswrapper[4812]: I1125 17:44:13.234210 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="c62c6374-9c53-4d85-a54b-5aba76cf722c" containerName="extract-utilities" Nov 25 17:44:13 crc kubenswrapper[4812]: E1125 17:44:13.234311 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c62c6374-9c53-4d85-a54b-5aba76cf722c" containerName="extract-content" Nov 25 17:44:13 crc kubenswrapper[4812]: I1125 17:44:13.234377 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="c62c6374-9c53-4d85-a54b-5aba76cf722c" containerName="extract-content" Nov 25 17:44:13 crc kubenswrapper[4812]: I1125 17:44:13.234732 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="c62c6374-9c53-4d85-a54b-5aba76cf722c" containerName="registry-server" Nov 25 17:44:13 crc kubenswrapper[4812]: I1125 17:44:13.236501 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tjqkm" Nov 25 17:44:13 crc kubenswrapper[4812]: I1125 17:44:13.245818 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tjqkm"] Nov 25 17:44:13 crc kubenswrapper[4812]: I1125 17:44:13.303723 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9dd7f12a-6ccb-4287-b79a-bc915fb503f6-catalog-content\") pod \"certified-operators-tjqkm\" (UID: \"9dd7f12a-6ccb-4287-b79a-bc915fb503f6\") " pod="openshift-marketplace/certified-operators-tjqkm" Nov 25 17:44:13 crc kubenswrapper[4812]: I1125 17:44:13.303789 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pm46q\" (UniqueName: \"kubernetes.io/projected/9dd7f12a-6ccb-4287-b79a-bc915fb503f6-kube-api-access-pm46q\") pod \"certified-operators-tjqkm\" (UID: \"9dd7f12a-6ccb-4287-b79a-bc915fb503f6\") " pod="openshift-marketplace/certified-operators-tjqkm" Nov 25 17:44:13 crc kubenswrapper[4812]: I1125 17:44:13.304109 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9dd7f12a-6ccb-4287-b79a-bc915fb503f6-utilities\") pod \"certified-operators-tjqkm\" (UID: \"9dd7f12a-6ccb-4287-b79a-bc915fb503f6\") " pod="openshift-marketplace/certified-operators-tjqkm" Nov 25 17:44:13 crc kubenswrapper[4812]: I1125 17:44:13.405995 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9dd7f12a-6ccb-4287-b79a-bc915fb503f6-catalog-content\") pod \"certified-operators-tjqkm\" (UID: \"9dd7f12a-6ccb-4287-b79a-bc915fb503f6\") " pod="openshift-marketplace/certified-operators-tjqkm" Nov 25 17:44:13 crc kubenswrapper[4812]: I1125 17:44:13.406083 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pm46q\" (UniqueName: \"kubernetes.io/projected/9dd7f12a-6ccb-4287-b79a-bc915fb503f6-kube-api-access-pm46q\") pod \"certified-operators-tjqkm\" (UID: \"9dd7f12a-6ccb-4287-b79a-bc915fb503f6\") " pod="openshift-marketplace/certified-operators-tjqkm" Nov 25 17:44:13 crc kubenswrapper[4812]: I1125 17:44:13.406189 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9dd7f12a-6ccb-4287-b79a-bc915fb503f6-utilities\") pod \"certified-operators-tjqkm\" (UID: \"9dd7f12a-6ccb-4287-b79a-bc915fb503f6\") " pod="openshift-marketplace/certified-operators-tjqkm" Nov 25 17:44:13 crc kubenswrapper[4812]: I1125 17:44:13.406611 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9dd7f12a-6ccb-4287-b79a-bc915fb503f6-catalog-content\") pod \"certified-operators-tjqkm\" (UID: \"9dd7f12a-6ccb-4287-b79a-bc915fb503f6\") " pod="openshift-marketplace/certified-operators-tjqkm" Nov 25 17:44:13 crc kubenswrapper[4812]: I1125 17:44:13.406747 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9dd7f12a-6ccb-4287-b79a-bc915fb503f6-utilities\") pod \"certified-operators-tjqkm\" (UID: \"9dd7f12a-6ccb-4287-b79a-bc915fb503f6\") " pod="openshift-marketplace/certified-operators-tjqkm" Nov 25 17:44:13 crc kubenswrapper[4812]: I1125 17:44:13.434866 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pm46q\" (UniqueName: \"kubernetes.io/projected/9dd7f12a-6ccb-4287-b79a-bc915fb503f6-kube-api-access-pm46q\") pod \"certified-operators-tjqkm\" (UID: \"9dd7f12a-6ccb-4287-b79a-bc915fb503f6\") " pod="openshift-marketplace/certified-operators-tjqkm" Nov 25 17:44:13 crc kubenswrapper[4812]: I1125 17:44:13.564004 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tjqkm" Nov 25 17:44:14 crc kubenswrapper[4812]: I1125 17:44:14.028952 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tjqkm"] Nov 25 17:44:14 crc kubenswrapper[4812]: I1125 17:44:14.449839 4812 generic.go:334] "Generic (PLEG): container finished" podID="9dd7f12a-6ccb-4287-b79a-bc915fb503f6" containerID="f9f5d488d807b0c072b7ad5ad4b3a13e905f4872faf5ccdb58ae0dcfb44a8a9e" exitCode=0 Nov 25 17:44:14 crc kubenswrapper[4812]: I1125 17:44:14.449913 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tjqkm" event={"ID":"9dd7f12a-6ccb-4287-b79a-bc915fb503f6","Type":"ContainerDied","Data":"f9f5d488d807b0c072b7ad5ad4b3a13e905f4872faf5ccdb58ae0dcfb44a8a9e"} Nov 25 17:44:14 crc kubenswrapper[4812]: I1125 17:44:14.450509 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tjqkm" event={"ID":"9dd7f12a-6ccb-4287-b79a-bc915fb503f6","Type":"ContainerStarted","Data":"a63db0e748e17bf0985b714f713330cbb0c1ab532964386eb61650974cdaa0da"} Nov 25 17:44:16 crc kubenswrapper[4812]: I1125 17:44:16.476269 4812 generic.go:334] "Generic (PLEG): container finished" podID="9dd7f12a-6ccb-4287-b79a-bc915fb503f6" containerID="0186d788a0d116b360c8eb8c6d4b40c42e0cdbe4cd4b8f485c0c96bddb322cf1" exitCode=0 Nov 25 17:44:16 crc kubenswrapper[4812]: I1125 17:44:16.476332 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tjqkm" event={"ID":"9dd7f12a-6ccb-4287-b79a-bc915fb503f6","Type":"ContainerDied","Data":"0186d788a0d116b360c8eb8c6d4b40c42e0cdbe4cd4b8f485c0c96bddb322cf1"} Nov 25 17:44:17 crc kubenswrapper[4812]: I1125 17:44:17.493652 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tjqkm" event={"ID":"9dd7f12a-6ccb-4287-b79a-bc915fb503f6","Type":"ContainerStarted","Data":"504bb5050064d0f702fc79bfe545ae1df13597562a41e6a023cd54c474932f4a"} Nov 25 17:44:18 crc kubenswrapper[4812]: I1125 17:44:18.533174 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-tjqkm" podStartSLOduration=3.020995606 podStartE2EDuration="5.533154527s" podCreationTimestamp="2025-11-25 17:44:13 +0000 UTC" firstStartedPulling="2025-11-25 17:44:14.451897073 +0000 UTC m=+3429.292039178" lastFinishedPulling="2025-11-25 17:44:16.964055984 +0000 UTC m=+3431.804198099" observedRunningTime="2025-11-25 17:44:18.524558805 +0000 UTC m=+3433.364700920" watchObservedRunningTime="2025-11-25 17:44:18.533154527 +0000 UTC m=+3433.373296622" Nov 25 17:44:23 crc kubenswrapper[4812]: I1125 17:44:23.565289 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-tjqkm" Nov 25 17:44:23 crc kubenswrapper[4812]: I1125 17:44:23.566017 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-tjqkm" Nov 25 17:44:23 crc kubenswrapper[4812]: I1125 17:44:23.650673 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-tjqkm" Nov 25 17:44:24 crc kubenswrapper[4812]: I1125 17:44:24.649683 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-tjqkm" Nov 25 17:44:24 crc kubenswrapper[4812]: I1125 17:44:24.746223 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tjqkm"] Nov 25 17:44:24 crc kubenswrapper[4812]: I1125 17:44:24.832314 4812 scope.go:117] "RemoveContainer" containerID="cc81341589bedf078d8e6c38e6438372a5e8be2fdd7f9bd85d4ec454ce34725e" Nov 25 17:44:24 crc kubenswrapper[4812]: E1125 17:44:24.832756 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:44:25 crc kubenswrapper[4812]: I1125 17:44:25.844834 4812 scope.go:117] "RemoveContainer" containerID="af9a1c4eb103063dbf8a3589a0292d12e52b5584993c70b3cf1f1f79bcc8a717" Nov 25 17:44:25 crc kubenswrapper[4812]: E1125 17:44:25.845843 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:44:26 crc kubenswrapper[4812]: I1125 17:44:26.607356 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-tjqkm" podUID="9dd7f12a-6ccb-4287-b79a-bc915fb503f6" containerName="registry-server" containerID="cri-o://504bb5050064d0f702fc79bfe545ae1df13597562a41e6a023cd54c474932f4a" gracePeriod=2 Nov 25 17:44:27 crc kubenswrapper[4812]: I1125 17:44:27.333266 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:44:27 crc kubenswrapper[4812]: I1125 17:44:27.333353 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:44:27 crc kubenswrapper[4812]: I1125 17:44:27.622100 4812 generic.go:334] "Generic (PLEG): container finished" podID="9dd7f12a-6ccb-4287-b79a-bc915fb503f6" containerID="504bb5050064d0f702fc79bfe545ae1df13597562a41e6a023cd54c474932f4a" exitCode=0 Nov 25 17:44:27 crc kubenswrapper[4812]: I1125 17:44:27.622152 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tjqkm" event={"ID":"9dd7f12a-6ccb-4287-b79a-bc915fb503f6","Type":"ContainerDied","Data":"504bb5050064d0f702fc79bfe545ae1df13597562a41e6a023cd54c474932f4a"} Nov 25 17:44:27 crc kubenswrapper[4812]: I1125 17:44:27.622212 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tjqkm" event={"ID":"9dd7f12a-6ccb-4287-b79a-bc915fb503f6","Type":"ContainerDied","Data":"a63db0e748e17bf0985b714f713330cbb0c1ab532964386eb61650974cdaa0da"} Nov 25 17:44:27 crc kubenswrapper[4812]: I1125 17:44:27.622230 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a63db0e748e17bf0985b714f713330cbb0c1ab532964386eb61650974cdaa0da" Nov 25 17:44:27 crc kubenswrapper[4812]: I1125 17:44:27.726107 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tjqkm" Nov 25 17:44:27 crc kubenswrapper[4812]: I1125 17:44:27.815339 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9dd7f12a-6ccb-4287-b79a-bc915fb503f6-utilities\") pod \"9dd7f12a-6ccb-4287-b79a-bc915fb503f6\" (UID: \"9dd7f12a-6ccb-4287-b79a-bc915fb503f6\") " Nov 25 17:44:27 crc kubenswrapper[4812]: I1125 17:44:27.815482 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9dd7f12a-6ccb-4287-b79a-bc915fb503f6-catalog-content\") pod \"9dd7f12a-6ccb-4287-b79a-bc915fb503f6\" (UID: \"9dd7f12a-6ccb-4287-b79a-bc915fb503f6\") " Nov 25 17:44:27 crc kubenswrapper[4812]: I1125 17:44:27.815636 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pm46q\" (UniqueName: \"kubernetes.io/projected/9dd7f12a-6ccb-4287-b79a-bc915fb503f6-kube-api-access-pm46q\") pod \"9dd7f12a-6ccb-4287-b79a-bc915fb503f6\" (UID: \"9dd7f12a-6ccb-4287-b79a-bc915fb503f6\") " Nov 25 17:44:27 crc kubenswrapper[4812]: I1125 17:44:27.817280 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9dd7f12a-6ccb-4287-b79a-bc915fb503f6-utilities" (OuterVolumeSpecName: "utilities") pod "9dd7f12a-6ccb-4287-b79a-bc915fb503f6" (UID: "9dd7f12a-6ccb-4287-b79a-bc915fb503f6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:44:27 crc kubenswrapper[4812]: I1125 17:44:27.824485 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9dd7f12a-6ccb-4287-b79a-bc915fb503f6-kube-api-access-pm46q" (OuterVolumeSpecName: "kube-api-access-pm46q") pod "9dd7f12a-6ccb-4287-b79a-bc915fb503f6" (UID: "9dd7f12a-6ccb-4287-b79a-bc915fb503f6"). InnerVolumeSpecName "kube-api-access-pm46q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:44:27 crc kubenswrapper[4812]: I1125 17:44:27.866840 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9dd7f12a-6ccb-4287-b79a-bc915fb503f6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9dd7f12a-6ccb-4287-b79a-bc915fb503f6" (UID: "9dd7f12a-6ccb-4287-b79a-bc915fb503f6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:44:27 crc kubenswrapper[4812]: I1125 17:44:27.918883 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9dd7f12a-6ccb-4287-b79a-bc915fb503f6-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:44:27 crc kubenswrapper[4812]: I1125 17:44:27.919194 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9dd7f12a-6ccb-4287-b79a-bc915fb503f6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:44:27 crc kubenswrapper[4812]: I1125 17:44:27.919243 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pm46q\" (UniqueName: \"kubernetes.io/projected/9dd7f12a-6ccb-4287-b79a-bc915fb503f6-kube-api-access-pm46q\") on node \"crc\" DevicePath \"\"" Nov 25 17:44:28 crc kubenswrapper[4812]: I1125 17:44:28.636663 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tjqkm" Nov 25 17:44:28 crc kubenswrapper[4812]: I1125 17:44:28.688075 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tjqkm"] Nov 25 17:44:28 crc kubenswrapper[4812]: I1125 17:44:28.696691 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-tjqkm"] Nov 25 17:44:29 crc kubenswrapper[4812]: I1125 17:44:29.852887 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9dd7f12a-6ccb-4287-b79a-bc915fb503f6" path="/var/lib/kubelet/pods/9dd7f12a-6ccb-4287-b79a-bc915fb503f6/volumes" Nov 25 17:44:30 crc kubenswrapper[4812]: I1125 17:44:30.998427 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-xr8tf"] Nov 25 17:44:30 crc kubenswrapper[4812]: E1125 17:44:30.998809 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9dd7f12a-6ccb-4287-b79a-bc915fb503f6" containerName="registry-server" Nov 25 17:44:30 crc kubenswrapper[4812]: I1125 17:44:30.998821 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="9dd7f12a-6ccb-4287-b79a-bc915fb503f6" containerName="registry-server" Nov 25 17:44:30 crc kubenswrapper[4812]: E1125 17:44:30.998846 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9dd7f12a-6ccb-4287-b79a-bc915fb503f6" containerName="extract-utilities" Nov 25 17:44:30 crc kubenswrapper[4812]: I1125 17:44:30.998853 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="9dd7f12a-6ccb-4287-b79a-bc915fb503f6" containerName="extract-utilities" Nov 25 17:44:30 crc kubenswrapper[4812]: E1125 17:44:30.998877 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9dd7f12a-6ccb-4287-b79a-bc915fb503f6" containerName="extract-content" Nov 25 17:44:30 crc kubenswrapper[4812]: I1125 17:44:30.998882 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="9dd7f12a-6ccb-4287-b79a-bc915fb503f6" containerName="extract-content" Nov 25 17:44:30 crc kubenswrapper[4812]: I1125 17:44:30.999067 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="9dd7f12a-6ccb-4287-b79a-bc915fb503f6" containerName="registry-server" Nov 25 17:44:31 crc kubenswrapper[4812]: I1125 17:44:31.000334 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xr8tf" Nov 25 17:44:31 crc kubenswrapper[4812]: I1125 17:44:31.015186 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xr8tf"] Nov 25 17:44:31 crc kubenswrapper[4812]: I1125 17:44:31.088639 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5w4b6\" (UniqueName: \"kubernetes.io/projected/a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7-kube-api-access-5w4b6\") pod \"redhat-operators-xr8tf\" (UID: \"a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7\") " pod="openshift-marketplace/redhat-operators-xr8tf" Nov 25 17:44:31 crc kubenswrapper[4812]: I1125 17:44:31.089080 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7-utilities\") pod \"redhat-operators-xr8tf\" (UID: \"a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7\") " pod="openshift-marketplace/redhat-operators-xr8tf" Nov 25 17:44:31 crc kubenswrapper[4812]: I1125 17:44:31.089279 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7-catalog-content\") pod \"redhat-operators-xr8tf\" (UID: \"a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7\") " pod="openshift-marketplace/redhat-operators-xr8tf" Nov 25 17:44:31 crc kubenswrapper[4812]: I1125 17:44:31.191313 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7-utilities\") pod \"redhat-operators-xr8tf\" (UID: \"a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7\") " pod="openshift-marketplace/redhat-operators-xr8tf" Nov 25 17:44:31 crc kubenswrapper[4812]: I1125 17:44:31.191430 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7-catalog-content\") pod \"redhat-operators-xr8tf\" (UID: \"a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7\") " pod="openshift-marketplace/redhat-operators-xr8tf" Nov 25 17:44:31 crc kubenswrapper[4812]: I1125 17:44:31.191476 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5w4b6\" (UniqueName: \"kubernetes.io/projected/a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7-kube-api-access-5w4b6\") pod \"redhat-operators-xr8tf\" (UID: \"a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7\") " pod="openshift-marketplace/redhat-operators-xr8tf" Nov 25 17:44:31 crc kubenswrapper[4812]: I1125 17:44:31.192323 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7-utilities\") pod \"redhat-operators-xr8tf\" (UID: \"a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7\") " pod="openshift-marketplace/redhat-operators-xr8tf" Nov 25 17:44:31 crc kubenswrapper[4812]: I1125 17:44:31.192383 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7-catalog-content\") pod \"redhat-operators-xr8tf\" (UID: \"a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7\") " pod="openshift-marketplace/redhat-operators-xr8tf" Nov 25 17:44:31 crc kubenswrapper[4812]: I1125 17:44:31.222491 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5w4b6\" (UniqueName: \"kubernetes.io/projected/a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7-kube-api-access-5w4b6\") pod \"redhat-operators-xr8tf\" (UID: \"a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7\") " pod="openshift-marketplace/redhat-operators-xr8tf" Nov 25 17:44:31 crc kubenswrapper[4812]: I1125 17:44:31.336113 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xr8tf" Nov 25 17:44:31 crc kubenswrapper[4812]: I1125 17:44:31.806467 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xr8tf"] Nov 25 17:44:32 crc kubenswrapper[4812]: E1125 17:44:32.211603 4812 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda96a4c2b_dd20_4af8_b5c0_2344e1cb6ec7.slice/crio-conmon-2a4b2e21663f3ec0978b728d8f9a6297ae725056fe9ab8e4b278f80036464464.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda96a4c2b_dd20_4af8_b5c0_2344e1cb6ec7.slice/crio-2a4b2e21663f3ec0978b728d8f9a6297ae725056fe9ab8e4b278f80036464464.scope\": RecentStats: unable to find data in memory cache]" Nov 25 17:44:32 crc kubenswrapper[4812]: I1125 17:44:32.680578 4812 generic.go:334] "Generic (PLEG): container finished" podID="a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7" containerID="2a4b2e21663f3ec0978b728d8f9a6297ae725056fe9ab8e4b278f80036464464" exitCode=0 Nov 25 17:44:32 crc kubenswrapper[4812]: I1125 17:44:32.680653 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xr8tf" event={"ID":"a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7","Type":"ContainerDied","Data":"2a4b2e21663f3ec0978b728d8f9a6297ae725056fe9ab8e4b278f80036464464"} Nov 25 17:44:32 crc kubenswrapper[4812]: I1125 17:44:32.680923 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xr8tf" event={"ID":"a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7","Type":"ContainerStarted","Data":"4846b322e82e47a0cfe8a4033752f9c80bcc4363546413e1fe739272a52de1f1"} Nov 25 17:44:33 crc kubenswrapper[4812]: I1125 17:44:33.690600 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xr8tf" event={"ID":"a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7","Type":"ContainerStarted","Data":"4e06b80999e3c0a777112e3da82165b37e9018eb963916e8baf57ff96a903cfd"} Nov 25 17:44:34 crc kubenswrapper[4812]: I1125 17:44:34.702308 4812 generic.go:334] "Generic (PLEG): container finished" podID="a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7" containerID="4e06b80999e3c0a777112e3da82165b37e9018eb963916e8baf57ff96a903cfd" exitCode=0 Nov 25 17:44:34 crc kubenswrapper[4812]: I1125 17:44:34.702365 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xr8tf" event={"ID":"a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7","Type":"ContainerDied","Data":"4e06b80999e3c0a777112e3da82165b37e9018eb963916e8baf57ff96a903cfd"} Nov 25 17:44:35 crc kubenswrapper[4812]: I1125 17:44:35.714738 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xr8tf" event={"ID":"a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7","Type":"ContainerStarted","Data":"8d29edae64ca5b029a00da2c13d60be51b9643d3d8266298f32104996e075380"} Nov 25 17:44:35 crc kubenswrapper[4812]: I1125 17:44:35.738829 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-xr8tf" podStartSLOduration=3.153247258 podStartE2EDuration="5.738812036s" podCreationTimestamp="2025-11-25 17:44:30 +0000 UTC" firstStartedPulling="2025-11-25 17:44:32.683647971 +0000 UTC m=+3447.523790106" lastFinishedPulling="2025-11-25 17:44:35.269212749 +0000 UTC m=+3450.109354884" observedRunningTime="2025-11-25 17:44:35.731113417 +0000 UTC m=+3450.571255522" watchObservedRunningTime="2025-11-25 17:44:35.738812036 +0000 UTC m=+3450.578954131" Nov 25 17:44:35 crc kubenswrapper[4812]: I1125 17:44:35.842434 4812 scope.go:117] "RemoveContainer" containerID="cc81341589bedf078d8e6c38e6438372a5e8be2fdd7f9bd85d4ec454ce34725e" Nov 25 17:44:35 crc kubenswrapper[4812]: E1125 17:44:35.842698 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:44:38 crc kubenswrapper[4812]: I1125 17:44:38.832400 4812 scope.go:117] "RemoveContainer" containerID="af9a1c4eb103063dbf8a3589a0292d12e52b5584993c70b3cf1f1f79bcc8a717" Nov 25 17:44:38 crc kubenswrapper[4812]: E1125 17:44:38.833416 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:44:41 crc kubenswrapper[4812]: I1125 17:44:41.337282 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-xr8tf" Nov 25 17:44:41 crc kubenswrapper[4812]: I1125 17:44:41.337714 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-xr8tf" Nov 25 17:44:41 crc kubenswrapper[4812]: I1125 17:44:41.397241 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-xr8tf" Nov 25 17:44:41 crc kubenswrapper[4812]: I1125 17:44:41.850024 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-xr8tf" Nov 25 17:44:41 crc kubenswrapper[4812]: I1125 17:44:41.919375 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xr8tf"] Nov 25 17:44:43 crc kubenswrapper[4812]: I1125 17:44:43.802377 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-xr8tf" podUID="a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7" containerName="registry-server" containerID="cri-o://8d29edae64ca5b029a00da2c13d60be51b9643d3d8266298f32104996e075380" gracePeriod=2 Nov 25 17:44:44 crc kubenswrapper[4812]: I1125 17:44:44.378748 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xr8tf" Nov 25 17:44:44 crc kubenswrapper[4812]: I1125 17:44:44.487808 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5w4b6\" (UniqueName: \"kubernetes.io/projected/a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7-kube-api-access-5w4b6\") pod \"a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7\" (UID: \"a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7\") " Nov 25 17:44:44 crc kubenswrapper[4812]: I1125 17:44:44.487919 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7-utilities\") pod \"a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7\" (UID: \"a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7\") " Nov 25 17:44:44 crc kubenswrapper[4812]: I1125 17:44:44.488006 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7-catalog-content\") pod \"a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7\" (UID: \"a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7\") " Nov 25 17:44:44 crc kubenswrapper[4812]: I1125 17:44:44.488929 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7-utilities" (OuterVolumeSpecName: "utilities") pod "a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7" (UID: "a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:44:44 crc kubenswrapper[4812]: I1125 17:44:44.493449 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7-kube-api-access-5w4b6" (OuterVolumeSpecName: "kube-api-access-5w4b6") pod "a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7" (UID: "a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7"). InnerVolumeSpecName "kube-api-access-5w4b6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:44:44 crc kubenswrapper[4812]: I1125 17:44:44.590582 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5w4b6\" (UniqueName: \"kubernetes.io/projected/a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7-kube-api-access-5w4b6\") on node \"crc\" DevicePath \"\"" Nov 25 17:44:44 crc kubenswrapper[4812]: I1125 17:44:44.590641 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:44:44 crc kubenswrapper[4812]: I1125 17:44:44.815868 4812 generic.go:334] "Generic (PLEG): container finished" podID="a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7" containerID="8d29edae64ca5b029a00da2c13d60be51b9643d3d8266298f32104996e075380" exitCode=0 Nov 25 17:44:44 crc kubenswrapper[4812]: I1125 17:44:44.815971 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xr8tf" event={"ID":"a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7","Type":"ContainerDied","Data":"8d29edae64ca5b029a00da2c13d60be51b9643d3d8266298f32104996e075380"} Nov 25 17:44:44 crc kubenswrapper[4812]: I1125 17:44:44.816006 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xr8tf" Nov 25 17:44:44 crc kubenswrapper[4812]: I1125 17:44:44.816043 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xr8tf" event={"ID":"a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7","Type":"ContainerDied","Data":"4846b322e82e47a0cfe8a4033752f9c80bcc4363546413e1fe739272a52de1f1"} Nov 25 17:44:44 crc kubenswrapper[4812]: I1125 17:44:44.816087 4812 scope.go:117] "RemoveContainer" containerID="8d29edae64ca5b029a00da2c13d60be51b9643d3d8266298f32104996e075380" Nov 25 17:44:44 crc kubenswrapper[4812]: I1125 17:44:44.846370 4812 scope.go:117] "RemoveContainer" containerID="4e06b80999e3c0a777112e3da82165b37e9018eb963916e8baf57ff96a903cfd" Nov 25 17:44:44 crc kubenswrapper[4812]: I1125 17:44:44.874183 4812 scope.go:117] "RemoveContainer" containerID="2a4b2e21663f3ec0978b728d8f9a6297ae725056fe9ab8e4b278f80036464464" Nov 25 17:44:44 crc kubenswrapper[4812]: I1125 17:44:44.959175 4812 scope.go:117] "RemoveContainer" containerID="8d29edae64ca5b029a00da2c13d60be51b9643d3d8266298f32104996e075380" Nov 25 17:44:44 crc kubenswrapper[4812]: E1125 17:44:44.959811 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8d29edae64ca5b029a00da2c13d60be51b9643d3d8266298f32104996e075380\": container with ID starting with 8d29edae64ca5b029a00da2c13d60be51b9643d3d8266298f32104996e075380 not found: ID does not exist" containerID="8d29edae64ca5b029a00da2c13d60be51b9643d3d8266298f32104996e075380" Nov 25 17:44:44 crc kubenswrapper[4812]: I1125 17:44:44.959917 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d29edae64ca5b029a00da2c13d60be51b9643d3d8266298f32104996e075380"} err="failed to get container status \"8d29edae64ca5b029a00da2c13d60be51b9643d3d8266298f32104996e075380\": rpc error: code = NotFound desc = could not find container \"8d29edae64ca5b029a00da2c13d60be51b9643d3d8266298f32104996e075380\": container with ID starting with 8d29edae64ca5b029a00da2c13d60be51b9643d3d8266298f32104996e075380 not found: ID does not exist" Nov 25 17:44:44 crc kubenswrapper[4812]: I1125 17:44:44.959948 4812 scope.go:117] "RemoveContainer" containerID="4e06b80999e3c0a777112e3da82165b37e9018eb963916e8baf57ff96a903cfd" Nov 25 17:44:44 crc kubenswrapper[4812]: E1125 17:44:44.960550 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4e06b80999e3c0a777112e3da82165b37e9018eb963916e8baf57ff96a903cfd\": container with ID starting with 4e06b80999e3c0a777112e3da82165b37e9018eb963916e8baf57ff96a903cfd not found: ID does not exist" containerID="4e06b80999e3c0a777112e3da82165b37e9018eb963916e8baf57ff96a903cfd" Nov 25 17:44:44 crc kubenswrapper[4812]: I1125 17:44:44.960591 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e06b80999e3c0a777112e3da82165b37e9018eb963916e8baf57ff96a903cfd"} err="failed to get container status \"4e06b80999e3c0a777112e3da82165b37e9018eb963916e8baf57ff96a903cfd\": rpc error: code = NotFound desc = could not find container \"4e06b80999e3c0a777112e3da82165b37e9018eb963916e8baf57ff96a903cfd\": container with ID starting with 4e06b80999e3c0a777112e3da82165b37e9018eb963916e8baf57ff96a903cfd not found: ID does not exist" Nov 25 17:44:44 crc kubenswrapper[4812]: I1125 17:44:44.960620 4812 scope.go:117] "RemoveContainer" containerID="2a4b2e21663f3ec0978b728d8f9a6297ae725056fe9ab8e4b278f80036464464" Nov 25 17:44:44 crc kubenswrapper[4812]: E1125 17:44:44.960904 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2a4b2e21663f3ec0978b728d8f9a6297ae725056fe9ab8e4b278f80036464464\": container with ID starting with 2a4b2e21663f3ec0978b728d8f9a6297ae725056fe9ab8e4b278f80036464464 not found: ID does not exist" containerID="2a4b2e21663f3ec0978b728d8f9a6297ae725056fe9ab8e4b278f80036464464" Nov 25 17:44:44 crc kubenswrapper[4812]: I1125 17:44:44.960930 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2a4b2e21663f3ec0978b728d8f9a6297ae725056fe9ab8e4b278f80036464464"} err="failed to get container status \"2a4b2e21663f3ec0978b728d8f9a6297ae725056fe9ab8e4b278f80036464464\": rpc error: code = NotFound desc = could not find container \"2a4b2e21663f3ec0978b728d8f9a6297ae725056fe9ab8e4b278f80036464464\": container with ID starting with 2a4b2e21663f3ec0978b728d8f9a6297ae725056fe9ab8e4b278f80036464464 not found: ID does not exist" Nov 25 17:44:47 crc kubenswrapper[4812]: I1125 17:44:47.047012 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7" (UID: "a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:44:47 crc kubenswrapper[4812]: I1125 17:44:47.148749 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:44:47 crc kubenswrapper[4812]: I1125 17:44:47.260871 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xr8tf"] Nov 25 17:44:47 crc kubenswrapper[4812]: I1125 17:44:47.270020 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-xr8tf"] Nov 25 17:44:47 crc kubenswrapper[4812]: I1125 17:44:47.832791 4812 scope.go:117] "RemoveContainer" containerID="cc81341589bedf078d8e6c38e6438372a5e8be2fdd7f9bd85d4ec454ce34725e" Nov 25 17:44:47 crc kubenswrapper[4812]: E1125 17:44:47.833585 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:44:47 crc kubenswrapper[4812]: I1125 17:44:47.859173 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7" path="/var/lib/kubelet/pods/a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7/volumes" Nov 25 17:44:51 crc kubenswrapper[4812]: I1125 17:44:51.831720 4812 scope.go:117] "RemoveContainer" containerID="af9a1c4eb103063dbf8a3589a0292d12e52b5584993c70b3cf1f1f79bcc8a717" Nov 25 17:44:51 crc kubenswrapper[4812]: E1125 17:44:51.832672 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:44:57 crc kubenswrapper[4812]: I1125 17:44:57.333457 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:44:57 crc kubenswrapper[4812]: I1125 17:44:57.333953 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:44:58 crc kubenswrapper[4812]: I1125 17:44:58.833213 4812 scope.go:117] "RemoveContainer" containerID="cc81341589bedf078d8e6c38e6438372a5e8be2fdd7f9bd85d4ec454ce34725e" Nov 25 17:44:58 crc kubenswrapper[4812]: E1125 17:44:58.833740 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:45:00 crc kubenswrapper[4812]: I1125 17:45:00.154203 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401545-mcjmm"] Nov 25 17:45:00 crc kubenswrapper[4812]: E1125 17:45:00.155231 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7" containerName="registry-server" Nov 25 17:45:00 crc kubenswrapper[4812]: I1125 17:45:00.155339 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7" containerName="registry-server" Nov 25 17:45:00 crc kubenswrapper[4812]: E1125 17:45:00.155365 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7" containerName="extract-utilities" Nov 25 17:45:00 crc kubenswrapper[4812]: I1125 17:45:00.155379 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7" containerName="extract-utilities" Nov 25 17:45:00 crc kubenswrapper[4812]: E1125 17:45:00.155416 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7" containerName="extract-content" Nov 25 17:45:00 crc kubenswrapper[4812]: I1125 17:45:00.155429 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7" containerName="extract-content" Nov 25 17:45:00 crc kubenswrapper[4812]: I1125 17:45:00.155780 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="a96a4c2b-dd20-4af8-b5c0-2344e1cb6ec7" containerName="registry-server" Nov 25 17:45:00 crc kubenswrapper[4812]: I1125 17:45:00.156781 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401545-mcjmm" Nov 25 17:45:00 crc kubenswrapper[4812]: I1125 17:45:00.164221 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 17:45:00 crc kubenswrapper[4812]: I1125 17:45:00.168817 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 17:45:00 crc kubenswrapper[4812]: I1125 17:45:00.178632 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/093154ab-13aa-473e-9edb-463e69da7d50-secret-volume\") pod \"collect-profiles-29401545-mcjmm\" (UID: \"093154ab-13aa-473e-9edb-463e69da7d50\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401545-mcjmm" Nov 25 17:45:00 crc kubenswrapper[4812]: I1125 17:45:00.178792 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/093154ab-13aa-473e-9edb-463e69da7d50-config-volume\") pod \"collect-profiles-29401545-mcjmm\" (UID: \"093154ab-13aa-473e-9edb-463e69da7d50\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401545-mcjmm" Nov 25 17:45:00 crc kubenswrapper[4812]: I1125 17:45:00.178864 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xmtdb\" (UniqueName: \"kubernetes.io/projected/093154ab-13aa-473e-9edb-463e69da7d50-kube-api-access-xmtdb\") pod \"collect-profiles-29401545-mcjmm\" (UID: \"093154ab-13aa-473e-9edb-463e69da7d50\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401545-mcjmm" Nov 25 17:45:00 crc kubenswrapper[4812]: I1125 17:45:00.200351 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401545-mcjmm"] Nov 25 17:45:00 crc kubenswrapper[4812]: I1125 17:45:00.280484 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/093154ab-13aa-473e-9edb-463e69da7d50-secret-volume\") pod \"collect-profiles-29401545-mcjmm\" (UID: \"093154ab-13aa-473e-9edb-463e69da7d50\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401545-mcjmm" Nov 25 17:45:00 crc kubenswrapper[4812]: I1125 17:45:00.280570 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/093154ab-13aa-473e-9edb-463e69da7d50-config-volume\") pod \"collect-profiles-29401545-mcjmm\" (UID: \"093154ab-13aa-473e-9edb-463e69da7d50\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401545-mcjmm" Nov 25 17:45:00 crc kubenswrapper[4812]: I1125 17:45:00.280628 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xmtdb\" (UniqueName: \"kubernetes.io/projected/093154ab-13aa-473e-9edb-463e69da7d50-kube-api-access-xmtdb\") pod \"collect-profiles-29401545-mcjmm\" (UID: \"093154ab-13aa-473e-9edb-463e69da7d50\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401545-mcjmm" Nov 25 17:45:00 crc kubenswrapper[4812]: I1125 17:45:00.281447 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/093154ab-13aa-473e-9edb-463e69da7d50-config-volume\") pod \"collect-profiles-29401545-mcjmm\" (UID: \"093154ab-13aa-473e-9edb-463e69da7d50\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401545-mcjmm" Nov 25 17:45:00 crc kubenswrapper[4812]: I1125 17:45:00.286644 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/093154ab-13aa-473e-9edb-463e69da7d50-secret-volume\") pod \"collect-profiles-29401545-mcjmm\" (UID: \"093154ab-13aa-473e-9edb-463e69da7d50\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401545-mcjmm" Nov 25 17:45:00 crc kubenswrapper[4812]: I1125 17:45:00.296176 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xmtdb\" (UniqueName: \"kubernetes.io/projected/093154ab-13aa-473e-9edb-463e69da7d50-kube-api-access-xmtdb\") pod \"collect-profiles-29401545-mcjmm\" (UID: \"093154ab-13aa-473e-9edb-463e69da7d50\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401545-mcjmm" Nov 25 17:45:00 crc kubenswrapper[4812]: I1125 17:45:00.495336 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401545-mcjmm" Nov 25 17:45:00 crc kubenswrapper[4812]: I1125 17:45:00.977854 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401545-mcjmm"] Nov 25 17:45:00 crc kubenswrapper[4812]: W1125 17:45:00.985594 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod093154ab_13aa_473e_9edb_463e69da7d50.slice/crio-f75ea282ef9794d30f58ec4905418fc8bf54341ea03e31bbf83a33cee4c05270 WatchSource:0}: Error finding container f75ea282ef9794d30f58ec4905418fc8bf54341ea03e31bbf83a33cee4c05270: Status 404 returned error can't find the container with id f75ea282ef9794d30f58ec4905418fc8bf54341ea03e31bbf83a33cee4c05270 Nov 25 17:45:01 crc kubenswrapper[4812]: I1125 17:45:01.043842 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401545-mcjmm" event={"ID":"093154ab-13aa-473e-9edb-463e69da7d50","Type":"ContainerStarted","Data":"f75ea282ef9794d30f58ec4905418fc8bf54341ea03e31bbf83a33cee4c05270"} Nov 25 17:45:02 crc kubenswrapper[4812]: I1125 17:45:02.052677 4812 generic.go:334] "Generic (PLEG): container finished" podID="093154ab-13aa-473e-9edb-463e69da7d50" containerID="f66ec2b9d9c6bd1efae4a88a72d407b35de5ff0d07f1a9e940d3b9b557948777" exitCode=0 Nov 25 17:45:02 crc kubenswrapper[4812]: I1125 17:45:02.052903 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401545-mcjmm" event={"ID":"093154ab-13aa-473e-9edb-463e69da7d50","Type":"ContainerDied","Data":"f66ec2b9d9c6bd1efae4a88a72d407b35de5ff0d07f1a9e940d3b9b557948777"} Nov 25 17:45:03 crc kubenswrapper[4812]: I1125 17:45:03.426845 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401545-mcjmm" Nov 25 17:45:03 crc kubenswrapper[4812]: I1125 17:45:03.437209 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xmtdb\" (UniqueName: \"kubernetes.io/projected/093154ab-13aa-473e-9edb-463e69da7d50-kube-api-access-xmtdb\") pod \"093154ab-13aa-473e-9edb-463e69da7d50\" (UID: \"093154ab-13aa-473e-9edb-463e69da7d50\") " Nov 25 17:45:03 crc kubenswrapper[4812]: I1125 17:45:03.437264 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/093154ab-13aa-473e-9edb-463e69da7d50-secret-volume\") pod \"093154ab-13aa-473e-9edb-463e69da7d50\" (UID: \"093154ab-13aa-473e-9edb-463e69da7d50\") " Nov 25 17:45:03 crc kubenswrapper[4812]: I1125 17:45:03.437340 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/093154ab-13aa-473e-9edb-463e69da7d50-config-volume\") pod \"093154ab-13aa-473e-9edb-463e69da7d50\" (UID: \"093154ab-13aa-473e-9edb-463e69da7d50\") " Nov 25 17:45:03 crc kubenswrapper[4812]: I1125 17:45:03.438915 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/093154ab-13aa-473e-9edb-463e69da7d50-config-volume" (OuterVolumeSpecName: "config-volume") pod "093154ab-13aa-473e-9edb-463e69da7d50" (UID: "093154ab-13aa-473e-9edb-463e69da7d50"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 17:45:03 crc kubenswrapper[4812]: I1125 17:45:03.446614 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/093154ab-13aa-473e-9edb-463e69da7d50-kube-api-access-xmtdb" (OuterVolumeSpecName: "kube-api-access-xmtdb") pod "093154ab-13aa-473e-9edb-463e69da7d50" (UID: "093154ab-13aa-473e-9edb-463e69da7d50"). InnerVolumeSpecName "kube-api-access-xmtdb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:45:03 crc kubenswrapper[4812]: I1125 17:45:03.446848 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/093154ab-13aa-473e-9edb-463e69da7d50-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "093154ab-13aa-473e-9edb-463e69da7d50" (UID: "093154ab-13aa-473e-9edb-463e69da7d50"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 17:45:03 crc kubenswrapper[4812]: I1125 17:45:03.540156 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xmtdb\" (UniqueName: \"kubernetes.io/projected/093154ab-13aa-473e-9edb-463e69da7d50-kube-api-access-xmtdb\") on node \"crc\" DevicePath \"\"" Nov 25 17:45:03 crc kubenswrapper[4812]: I1125 17:45:03.540192 4812 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/093154ab-13aa-473e-9edb-463e69da7d50-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 17:45:03 crc kubenswrapper[4812]: I1125 17:45:03.540205 4812 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/093154ab-13aa-473e-9edb-463e69da7d50-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 17:45:04 crc kubenswrapper[4812]: I1125 17:45:04.076966 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401545-mcjmm" event={"ID":"093154ab-13aa-473e-9edb-463e69da7d50","Type":"ContainerDied","Data":"f75ea282ef9794d30f58ec4905418fc8bf54341ea03e31bbf83a33cee4c05270"} Nov 25 17:45:04 crc kubenswrapper[4812]: I1125 17:45:04.077281 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f75ea282ef9794d30f58ec4905418fc8bf54341ea03e31bbf83a33cee4c05270" Nov 25 17:45:04 crc kubenswrapper[4812]: I1125 17:45:04.077207 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401545-mcjmm" Nov 25 17:45:04 crc kubenswrapper[4812]: I1125 17:45:04.500139 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401500-4nh89"] Nov 25 17:45:04 crc kubenswrapper[4812]: I1125 17:45:04.510763 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401500-4nh89"] Nov 25 17:45:04 crc kubenswrapper[4812]: I1125 17:45:04.831814 4812 scope.go:117] "RemoveContainer" containerID="af9a1c4eb103063dbf8a3589a0292d12e52b5584993c70b3cf1f1f79bcc8a717" Nov 25 17:45:04 crc kubenswrapper[4812]: E1125 17:45:04.832082 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:45:05 crc kubenswrapper[4812]: I1125 17:45:05.848910 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eeffb98c-9b48-40c2-9c30-465876bb1a05" path="/var/lib/kubelet/pods/eeffb98c-9b48-40c2-9c30-465876bb1a05/volumes" Nov 25 17:45:13 crc kubenswrapper[4812]: I1125 17:45:13.832876 4812 scope.go:117] "RemoveContainer" containerID="cc81341589bedf078d8e6c38e6438372a5e8be2fdd7f9bd85d4ec454ce34725e" Nov 25 17:45:13 crc kubenswrapper[4812]: E1125 17:45:13.834568 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:45:16 crc kubenswrapper[4812]: I1125 17:45:16.831840 4812 scope.go:117] "RemoveContainer" containerID="af9a1c4eb103063dbf8a3589a0292d12e52b5584993c70b3cf1f1f79bcc8a717" Nov 25 17:45:16 crc kubenswrapper[4812]: E1125 17:45:16.832953 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:45:27 crc kubenswrapper[4812]: I1125 17:45:27.333048 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:45:27 crc kubenswrapper[4812]: I1125 17:45:27.333727 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:45:27 crc kubenswrapper[4812]: I1125 17:45:27.333794 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" Nov 25 17:45:27 crc kubenswrapper[4812]: I1125 17:45:27.334754 4812 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0acc40c7b955f4eaa4833a720fa59f543dc276d015523fb120f2e257ee7a126e"} pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 17:45:27 crc kubenswrapper[4812]: I1125 17:45:27.334834 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" containerID="cri-o://0acc40c7b955f4eaa4833a720fa59f543dc276d015523fb120f2e257ee7a126e" gracePeriod=600 Nov 25 17:45:27 crc kubenswrapper[4812]: I1125 17:45:27.832201 4812 scope.go:117] "RemoveContainer" containerID="cc81341589bedf078d8e6c38e6438372a5e8be2fdd7f9bd85d4ec454ce34725e" Nov 25 17:45:27 crc kubenswrapper[4812]: E1125 17:45:27.833033 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:45:28 crc kubenswrapper[4812]: I1125 17:45:28.364332 4812 generic.go:334] "Generic (PLEG): container finished" podID="8ed911cf-2139-4b12-84ba-af635585ba29" containerID="0acc40c7b955f4eaa4833a720fa59f543dc276d015523fb120f2e257ee7a126e" exitCode=0 Nov 25 17:45:28 crc kubenswrapper[4812]: I1125 17:45:28.364382 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" event={"ID":"8ed911cf-2139-4b12-84ba-af635585ba29","Type":"ContainerDied","Data":"0acc40c7b955f4eaa4833a720fa59f543dc276d015523fb120f2e257ee7a126e"} Nov 25 17:45:28 crc kubenswrapper[4812]: I1125 17:45:28.365028 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" event={"ID":"8ed911cf-2139-4b12-84ba-af635585ba29","Type":"ContainerStarted","Data":"8752772c4c7b9e552da7d3e8bd64ef561b7ab0875c65565697c2b0831f94b402"} Nov 25 17:45:28 crc kubenswrapper[4812]: I1125 17:45:28.365064 4812 scope.go:117] "RemoveContainer" containerID="2ae50c0855798672cbbf542aacde517f5fc4d85f0598c9ce69441f2a863ae0cd" Nov 25 17:45:30 crc kubenswrapper[4812]: I1125 17:45:30.831333 4812 scope.go:117] "RemoveContainer" containerID="af9a1c4eb103063dbf8a3589a0292d12e52b5584993c70b3cf1f1f79bcc8a717" Nov 25 17:45:30 crc kubenswrapper[4812]: E1125 17:45:30.832158 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:45:42 crc kubenswrapper[4812]: I1125 17:45:42.832146 4812 scope.go:117] "RemoveContainer" containerID="cc81341589bedf078d8e6c38e6438372a5e8be2fdd7f9bd85d4ec454ce34725e" Nov 25 17:45:42 crc kubenswrapper[4812]: E1125 17:45:42.833992 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:45:42 crc kubenswrapper[4812]: I1125 17:45:42.834260 4812 scope.go:117] "RemoveContainer" containerID="af9a1c4eb103063dbf8a3589a0292d12e52b5584993c70b3cf1f1f79bcc8a717" Nov 25 17:45:42 crc kubenswrapper[4812]: E1125 17:45:42.834667 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:45:55 crc kubenswrapper[4812]: I1125 17:45:55.876905 4812 scope.go:117] "RemoveContainer" containerID="af9a1c4eb103063dbf8a3589a0292d12e52b5584993c70b3cf1f1f79bcc8a717" Nov 25 17:45:55 crc kubenswrapper[4812]: E1125 17:45:55.880198 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:45:56 crc kubenswrapper[4812]: I1125 17:45:56.832389 4812 scope.go:117] "RemoveContainer" containerID="cc81341589bedf078d8e6c38e6438372a5e8be2fdd7f9bd85d4ec454ce34725e" Nov 25 17:45:56 crc kubenswrapper[4812]: E1125 17:45:56.833272 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:46:02 crc kubenswrapper[4812]: I1125 17:46:02.199346 4812 scope.go:117] "RemoveContainer" containerID="88ba5a2fad5cc62974f0cacc31b9da5955276132b1de7bf8729ff065d5ae5df2" Nov 25 17:46:11 crc kubenswrapper[4812]: I1125 17:46:11.834004 4812 scope.go:117] "RemoveContainer" containerID="cc81341589bedf078d8e6c38e6438372a5e8be2fdd7f9bd85d4ec454ce34725e" Nov 25 17:46:11 crc kubenswrapper[4812]: I1125 17:46:11.835057 4812 scope.go:117] "RemoveContainer" containerID="af9a1c4eb103063dbf8a3589a0292d12e52b5584993c70b3cf1f1f79bcc8a717" Nov 25 17:46:11 crc kubenswrapper[4812]: E1125 17:46:11.835439 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:46:11 crc kubenswrapper[4812]: E1125 17:46:11.835625 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:46:23 crc kubenswrapper[4812]: I1125 17:46:23.833312 4812 scope.go:117] "RemoveContainer" containerID="cc81341589bedf078d8e6c38e6438372a5e8be2fdd7f9bd85d4ec454ce34725e" Nov 25 17:46:24 crc kubenswrapper[4812]: I1125 17:46:24.831841 4812 scope.go:117] "RemoveContainer" containerID="af9a1c4eb103063dbf8a3589a0292d12e52b5584993c70b3cf1f1f79bcc8a717" Nov 25 17:46:24 crc kubenswrapper[4812]: E1125 17:46:24.832380 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:46:25 crc kubenswrapper[4812]: I1125 17:46:25.069599 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"6d2f6681-d1d6-45e9-b0f4-65209caf0069","Type":"ContainerStarted","Data":"d8a0f079b413904eb4de30b6e1e84a36ea8b997fb99057143595e540803392af"} Nov 25 17:46:25 crc kubenswrapper[4812]: I1125 17:46:25.070785 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/manila-api-0" Nov 25 17:46:36 crc kubenswrapper[4812]: I1125 17:46:36.831818 4812 scope.go:117] "RemoveContainer" containerID="af9a1c4eb103063dbf8a3589a0292d12e52b5584993c70b3cf1f1f79bcc8a717" Nov 25 17:46:36 crc kubenswrapper[4812]: E1125 17:46:36.832695 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:46:41 crc kubenswrapper[4812]: I1125 17:46:41.079446 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-079a-account-create-xdmnx"] Nov 25 17:46:41 crc kubenswrapper[4812]: I1125 17:46:41.095738 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-079a-account-create-xdmnx"] Nov 25 17:46:41 crc kubenswrapper[4812]: I1125 17:46:41.225740 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:46:41 crc kubenswrapper[4812]: I1125 17:46:41.258699 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:46:41 crc kubenswrapper[4812]: I1125 17:46:41.848058 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d15ab92-82ab-47f1-b377-eec15c9c7b99" path="/var/lib/kubelet/pods/9d15ab92-82ab-47f1-b377-eec15c9c7b99/volumes" Nov 25 17:46:42 crc kubenswrapper[4812]: I1125 17:46:42.041106 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-db-create-r4w2f"] Nov 25 17:46:42 crc kubenswrapper[4812]: I1125 17:46:42.055622 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-db-create-r4w2f"] Nov 25 17:46:43 crc kubenswrapper[4812]: I1125 17:46:43.853333 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e556e3f-d3af-47df-9561-e93c12e281d1" path="/var/lib/kubelet/pods/2e556e3f-d3af-47df-9561-e93c12e281d1/volumes" Nov 25 17:46:45 crc kubenswrapper[4812]: I1125 17:46:45.025303 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-jhp5k"] Nov 25 17:46:45 crc kubenswrapper[4812]: E1125 17:46:45.025754 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="093154ab-13aa-473e-9edb-463e69da7d50" containerName="collect-profiles" Nov 25 17:46:45 crc kubenswrapper[4812]: I1125 17:46:45.025770 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="093154ab-13aa-473e-9edb-463e69da7d50" containerName="collect-profiles" Nov 25 17:46:45 crc kubenswrapper[4812]: I1125 17:46:45.026016 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="093154ab-13aa-473e-9edb-463e69da7d50" containerName="collect-profiles" Nov 25 17:46:45 crc kubenswrapper[4812]: I1125 17:46:45.027704 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jhp5k" Nov 25 17:46:45 crc kubenswrapper[4812]: I1125 17:46:45.067144 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jhp5k"] Nov 25 17:46:45 crc kubenswrapper[4812]: I1125 17:46:45.169883 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sp82h\" (UniqueName: \"kubernetes.io/projected/1a515c99-5fb7-460a-bec8-caadf9a053f4-kube-api-access-sp82h\") pod \"community-operators-jhp5k\" (UID: \"1a515c99-5fb7-460a-bec8-caadf9a053f4\") " pod="openshift-marketplace/community-operators-jhp5k" Nov 25 17:46:45 crc kubenswrapper[4812]: I1125 17:46:45.170039 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1a515c99-5fb7-460a-bec8-caadf9a053f4-catalog-content\") pod \"community-operators-jhp5k\" (UID: \"1a515c99-5fb7-460a-bec8-caadf9a053f4\") " pod="openshift-marketplace/community-operators-jhp5k" Nov 25 17:46:45 crc kubenswrapper[4812]: I1125 17:46:45.170101 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1a515c99-5fb7-460a-bec8-caadf9a053f4-utilities\") pod \"community-operators-jhp5k\" (UID: \"1a515c99-5fb7-460a-bec8-caadf9a053f4\") " pod="openshift-marketplace/community-operators-jhp5k" Nov 25 17:46:45 crc kubenswrapper[4812]: I1125 17:46:45.272130 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1a515c99-5fb7-460a-bec8-caadf9a053f4-catalog-content\") pod \"community-operators-jhp5k\" (UID: \"1a515c99-5fb7-460a-bec8-caadf9a053f4\") " pod="openshift-marketplace/community-operators-jhp5k" Nov 25 17:46:45 crc kubenswrapper[4812]: I1125 17:46:45.272206 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1a515c99-5fb7-460a-bec8-caadf9a053f4-utilities\") pod \"community-operators-jhp5k\" (UID: \"1a515c99-5fb7-460a-bec8-caadf9a053f4\") " pod="openshift-marketplace/community-operators-jhp5k" Nov 25 17:46:45 crc kubenswrapper[4812]: I1125 17:46:45.272307 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sp82h\" (UniqueName: \"kubernetes.io/projected/1a515c99-5fb7-460a-bec8-caadf9a053f4-kube-api-access-sp82h\") pod \"community-operators-jhp5k\" (UID: \"1a515c99-5fb7-460a-bec8-caadf9a053f4\") " pod="openshift-marketplace/community-operators-jhp5k" Nov 25 17:46:45 crc kubenswrapper[4812]: I1125 17:46:45.272969 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1a515c99-5fb7-460a-bec8-caadf9a053f4-utilities\") pod \"community-operators-jhp5k\" (UID: \"1a515c99-5fb7-460a-bec8-caadf9a053f4\") " pod="openshift-marketplace/community-operators-jhp5k" Nov 25 17:46:45 crc kubenswrapper[4812]: I1125 17:46:45.272990 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1a515c99-5fb7-460a-bec8-caadf9a053f4-catalog-content\") pod \"community-operators-jhp5k\" (UID: \"1a515c99-5fb7-460a-bec8-caadf9a053f4\") " pod="openshift-marketplace/community-operators-jhp5k" Nov 25 17:46:45 crc kubenswrapper[4812]: I1125 17:46:45.302948 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sp82h\" (UniqueName: \"kubernetes.io/projected/1a515c99-5fb7-460a-bec8-caadf9a053f4-kube-api-access-sp82h\") pod \"community-operators-jhp5k\" (UID: \"1a515c99-5fb7-460a-bec8-caadf9a053f4\") " pod="openshift-marketplace/community-operators-jhp5k" Nov 25 17:46:45 crc kubenswrapper[4812]: I1125 17:46:45.352484 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jhp5k" Nov 25 17:46:45 crc kubenswrapper[4812]: I1125 17:46:45.939485 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jhp5k"] Nov 25 17:46:46 crc kubenswrapper[4812]: I1125 17:46:46.328846 4812 generic.go:334] "Generic (PLEG): container finished" podID="1a515c99-5fb7-460a-bec8-caadf9a053f4" containerID="a86eef9c29713154760e614f2f49d8a898278554710dd71612a57101b537e9b5" exitCode=0 Nov 25 17:46:46 crc kubenswrapper[4812]: I1125 17:46:46.329009 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jhp5k" event={"ID":"1a515c99-5fb7-460a-bec8-caadf9a053f4","Type":"ContainerDied","Data":"a86eef9c29713154760e614f2f49d8a898278554710dd71612a57101b537e9b5"} Nov 25 17:46:46 crc kubenswrapper[4812]: I1125 17:46:46.329274 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jhp5k" event={"ID":"1a515c99-5fb7-460a-bec8-caadf9a053f4","Type":"ContainerStarted","Data":"22061580527d95b93e6c91d473dc4d6325cb9c66643664a41389c42b392b84f7"} Nov 25 17:46:47 crc kubenswrapper[4812]: I1125 17:46:47.338614 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jhp5k" event={"ID":"1a515c99-5fb7-460a-bec8-caadf9a053f4","Type":"ContainerStarted","Data":"7ab77d47629c471005c862a27ba1e511d548716e5fd5cf8c3c6be112dbc54ac0"} Nov 25 17:46:48 crc kubenswrapper[4812]: I1125 17:46:48.347758 4812 generic.go:334] "Generic (PLEG): container finished" podID="1a515c99-5fb7-460a-bec8-caadf9a053f4" containerID="7ab77d47629c471005c862a27ba1e511d548716e5fd5cf8c3c6be112dbc54ac0" exitCode=0 Nov 25 17:46:48 crc kubenswrapper[4812]: I1125 17:46:48.347809 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jhp5k" event={"ID":"1a515c99-5fb7-460a-bec8-caadf9a053f4","Type":"ContainerDied","Data":"7ab77d47629c471005c862a27ba1e511d548716e5fd5cf8c3c6be112dbc54ac0"} Nov 25 17:46:49 crc kubenswrapper[4812]: I1125 17:46:49.362243 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jhp5k" event={"ID":"1a515c99-5fb7-460a-bec8-caadf9a053f4","Type":"ContainerStarted","Data":"9d5b4977d753926624920df29a2cda7e2461d591d07942e72a257e7cf65a2eaa"} Nov 25 17:46:50 crc kubenswrapper[4812]: I1125 17:46:50.831693 4812 scope.go:117] "RemoveContainer" containerID="af9a1c4eb103063dbf8a3589a0292d12e52b5584993c70b3cf1f1f79bcc8a717" Nov 25 17:46:50 crc kubenswrapper[4812]: E1125 17:46:50.833632 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:46:51 crc kubenswrapper[4812]: I1125 17:46:51.152071 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:46:51 crc kubenswrapper[4812]: I1125 17:46:51.239026 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:46:55 crc kubenswrapper[4812]: I1125 17:46:55.353656 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-jhp5k" Nov 25 17:46:55 crc kubenswrapper[4812]: I1125 17:46:55.354226 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-jhp5k" Nov 25 17:46:55 crc kubenswrapper[4812]: I1125 17:46:55.456965 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-jhp5k" Nov 25 17:46:55 crc kubenswrapper[4812]: I1125 17:46:55.494192 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-jhp5k" podStartSLOduration=9.103789079 podStartE2EDuration="11.494165741s" podCreationTimestamp="2025-11-25 17:46:44 +0000 UTC" firstStartedPulling="2025-11-25 17:46:46.334071968 +0000 UTC m=+3581.174214073" lastFinishedPulling="2025-11-25 17:46:48.72444864 +0000 UTC m=+3583.564590735" observedRunningTime="2025-11-25 17:46:49.397731505 +0000 UTC m=+3584.237873670" watchObservedRunningTime="2025-11-25 17:46:55.494165741 +0000 UTC m=+3590.334307846" Nov 25 17:46:55 crc kubenswrapper[4812]: I1125 17:46:55.545002 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-jhp5k" Nov 25 17:46:55 crc kubenswrapper[4812]: I1125 17:46:55.711107 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jhp5k"] Nov 25 17:46:57 crc kubenswrapper[4812]: I1125 17:46:57.452721 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-jhp5k" podUID="1a515c99-5fb7-460a-bec8-caadf9a053f4" containerName="registry-server" containerID="cri-o://9d5b4977d753926624920df29a2cda7e2461d591d07942e72a257e7cf65a2eaa" gracePeriod=2 Nov 25 17:46:57 crc kubenswrapper[4812]: I1125 17:46:57.987226 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jhp5k" Nov 25 17:46:58 crc kubenswrapper[4812]: I1125 17:46:58.089445 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1a515c99-5fb7-460a-bec8-caadf9a053f4-catalog-content\") pod \"1a515c99-5fb7-460a-bec8-caadf9a053f4\" (UID: \"1a515c99-5fb7-460a-bec8-caadf9a053f4\") " Nov 25 17:46:58 crc kubenswrapper[4812]: I1125 17:46:58.089637 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1a515c99-5fb7-460a-bec8-caadf9a053f4-utilities\") pod \"1a515c99-5fb7-460a-bec8-caadf9a053f4\" (UID: \"1a515c99-5fb7-460a-bec8-caadf9a053f4\") " Nov 25 17:46:58 crc kubenswrapper[4812]: I1125 17:46:58.089840 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sp82h\" (UniqueName: \"kubernetes.io/projected/1a515c99-5fb7-460a-bec8-caadf9a053f4-kube-api-access-sp82h\") pod \"1a515c99-5fb7-460a-bec8-caadf9a053f4\" (UID: \"1a515c99-5fb7-460a-bec8-caadf9a053f4\") " Nov 25 17:46:58 crc kubenswrapper[4812]: I1125 17:46:58.091434 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1a515c99-5fb7-460a-bec8-caadf9a053f4-utilities" (OuterVolumeSpecName: "utilities") pod "1a515c99-5fb7-460a-bec8-caadf9a053f4" (UID: "1a515c99-5fb7-460a-bec8-caadf9a053f4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:46:58 crc kubenswrapper[4812]: I1125 17:46:58.098828 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a515c99-5fb7-460a-bec8-caadf9a053f4-kube-api-access-sp82h" (OuterVolumeSpecName: "kube-api-access-sp82h") pod "1a515c99-5fb7-460a-bec8-caadf9a053f4" (UID: "1a515c99-5fb7-460a-bec8-caadf9a053f4"). InnerVolumeSpecName "kube-api-access-sp82h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:46:58 crc kubenswrapper[4812]: I1125 17:46:58.170890 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1a515c99-5fb7-460a-bec8-caadf9a053f4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1a515c99-5fb7-460a-bec8-caadf9a053f4" (UID: "1a515c99-5fb7-460a-bec8-caadf9a053f4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:46:58 crc kubenswrapper[4812]: I1125 17:46:58.191920 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1a515c99-5fb7-460a-bec8-caadf9a053f4-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:46:58 crc kubenswrapper[4812]: I1125 17:46:58.191957 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sp82h\" (UniqueName: \"kubernetes.io/projected/1a515c99-5fb7-460a-bec8-caadf9a053f4-kube-api-access-sp82h\") on node \"crc\" DevicePath \"\"" Nov 25 17:46:58 crc kubenswrapper[4812]: I1125 17:46:58.191971 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1a515c99-5fb7-460a-bec8-caadf9a053f4-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:46:58 crc kubenswrapper[4812]: I1125 17:46:58.462654 4812 generic.go:334] "Generic (PLEG): container finished" podID="1a515c99-5fb7-460a-bec8-caadf9a053f4" containerID="9d5b4977d753926624920df29a2cda7e2461d591d07942e72a257e7cf65a2eaa" exitCode=0 Nov 25 17:46:58 crc kubenswrapper[4812]: I1125 17:46:58.462759 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jhp5k" Nov 25 17:46:58 crc kubenswrapper[4812]: I1125 17:46:58.462754 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jhp5k" event={"ID":"1a515c99-5fb7-460a-bec8-caadf9a053f4","Type":"ContainerDied","Data":"9d5b4977d753926624920df29a2cda7e2461d591d07942e72a257e7cf65a2eaa"} Nov 25 17:46:58 crc kubenswrapper[4812]: I1125 17:46:58.463705 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jhp5k" event={"ID":"1a515c99-5fb7-460a-bec8-caadf9a053f4","Type":"ContainerDied","Data":"22061580527d95b93e6c91d473dc4d6325cb9c66643664a41389c42b392b84f7"} Nov 25 17:46:58 crc kubenswrapper[4812]: I1125 17:46:58.463752 4812 scope.go:117] "RemoveContainer" containerID="9d5b4977d753926624920df29a2cda7e2461d591d07942e72a257e7cf65a2eaa" Nov 25 17:46:58 crc kubenswrapper[4812]: I1125 17:46:58.504920 4812 scope.go:117] "RemoveContainer" containerID="7ab77d47629c471005c862a27ba1e511d548716e5fd5cf8c3c6be112dbc54ac0" Nov 25 17:46:58 crc kubenswrapper[4812]: I1125 17:46:58.511468 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jhp5k"] Nov 25 17:46:58 crc kubenswrapper[4812]: I1125 17:46:58.525168 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-jhp5k"] Nov 25 17:46:58 crc kubenswrapper[4812]: I1125 17:46:58.540924 4812 scope.go:117] "RemoveContainer" containerID="a86eef9c29713154760e614f2f49d8a898278554710dd71612a57101b537e9b5" Nov 25 17:46:58 crc kubenswrapper[4812]: I1125 17:46:58.590786 4812 scope.go:117] "RemoveContainer" containerID="9d5b4977d753926624920df29a2cda7e2461d591d07942e72a257e7cf65a2eaa" Nov 25 17:46:58 crc kubenswrapper[4812]: E1125 17:46:58.591375 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d5b4977d753926624920df29a2cda7e2461d591d07942e72a257e7cf65a2eaa\": container with ID starting with 9d5b4977d753926624920df29a2cda7e2461d591d07942e72a257e7cf65a2eaa not found: ID does not exist" containerID="9d5b4977d753926624920df29a2cda7e2461d591d07942e72a257e7cf65a2eaa" Nov 25 17:46:58 crc kubenswrapper[4812]: I1125 17:46:58.591436 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d5b4977d753926624920df29a2cda7e2461d591d07942e72a257e7cf65a2eaa"} err="failed to get container status \"9d5b4977d753926624920df29a2cda7e2461d591d07942e72a257e7cf65a2eaa\": rpc error: code = NotFound desc = could not find container \"9d5b4977d753926624920df29a2cda7e2461d591d07942e72a257e7cf65a2eaa\": container with ID starting with 9d5b4977d753926624920df29a2cda7e2461d591d07942e72a257e7cf65a2eaa not found: ID does not exist" Nov 25 17:46:58 crc kubenswrapper[4812]: I1125 17:46:58.591462 4812 scope.go:117] "RemoveContainer" containerID="7ab77d47629c471005c862a27ba1e511d548716e5fd5cf8c3c6be112dbc54ac0" Nov 25 17:46:58 crc kubenswrapper[4812]: E1125 17:46:58.592088 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7ab77d47629c471005c862a27ba1e511d548716e5fd5cf8c3c6be112dbc54ac0\": container with ID starting with 7ab77d47629c471005c862a27ba1e511d548716e5fd5cf8c3c6be112dbc54ac0 not found: ID does not exist" containerID="7ab77d47629c471005c862a27ba1e511d548716e5fd5cf8c3c6be112dbc54ac0" Nov 25 17:46:58 crc kubenswrapper[4812]: I1125 17:46:58.592123 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7ab77d47629c471005c862a27ba1e511d548716e5fd5cf8c3c6be112dbc54ac0"} err="failed to get container status \"7ab77d47629c471005c862a27ba1e511d548716e5fd5cf8c3c6be112dbc54ac0\": rpc error: code = NotFound desc = could not find container \"7ab77d47629c471005c862a27ba1e511d548716e5fd5cf8c3c6be112dbc54ac0\": container with ID starting with 7ab77d47629c471005c862a27ba1e511d548716e5fd5cf8c3c6be112dbc54ac0 not found: ID does not exist" Nov 25 17:46:58 crc kubenswrapper[4812]: I1125 17:46:58.592137 4812 scope.go:117] "RemoveContainer" containerID="a86eef9c29713154760e614f2f49d8a898278554710dd71612a57101b537e9b5" Nov 25 17:46:58 crc kubenswrapper[4812]: E1125 17:46:58.592636 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a86eef9c29713154760e614f2f49d8a898278554710dd71612a57101b537e9b5\": container with ID starting with a86eef9c29713154760e614f2f49d8a898278554710dd71612a57101b537e9b5 not found: ID does not exist" containerID="a86eef9c29713154760e614f2f49d8a898278554710dd71612a57101b537e9b5" Nov 25 17:46:58 crc kubenswrapper[4812]: I1125 17:46:58.592671 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a86eef9c29713154760e614f2f49d8a898278554710dd71612a57101b537e9b5"} err="failed to get container status \"a86eef9c29713154760e614f2f49d8a898278554710dd71612a57101b537e9b5\": rpc error: code = NotFound desc = could not find container \"a86eef9c29713154760e614f2f49d8a898278554710dd71612a57101b537e9b5\": container with ID starting with a86eef9c29713154760e614f2f49d8a898278554710dd71612a57101b537e9b5 not found: ID does not exist" Nov 25 17:46:59 crc kubenswrapper[4812]: I1125 17:46:59.850137 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1a515c99-5fb7-460a-bec8-caadf9a053f4" path="/var/lib/kubelet/pods/1a515c99-5fb7-460a-bec8-caadf9a053f4/volumes" Nov 25 17:47:00 crc kubenswrapper[4812]: I1125 17:47:00.197817 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:47:00 crc kubenswrapper[4812]: I1125 17:47:00.200782 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:47:00 crc kubenswrapper[4812]: I1125 17:47:00.200863 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/manila-api-0" Nov 25 17:47:00 crc kubenswrapper[4812]: I1125 17:47:00.202065 4812 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="manila-api" containerStatusID={"Type":"cri-o","ID":"d8a0f079b413904eb4de30b6e1e84a36ea8b997fb99057143595e540803392af"} pod="openstack/manila-api-0" containerMessage="Container manila-api failed liveness probe, will be restarted" Nov 25 17:47:00 crc kubenswrapper[4812]: I1125 17:47:00.202157 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" containerID="cri-o://d8a0f079b413904eb4de30b6e1e84a36ea8b997fb99057143595e540803392af" gracePeriod=30 Nov 25 17:47:00 crc kubenswrapper[4812]: I1125 17:47:00.211356 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="Get \"https://10.217.1.1:8786/healthcheck\": EOF" Nov 25 17:47:02 crc kubenswrapper[4812]: I1125 17:47:02.298433 4812 scope.go:117] "RemoveContainer" containerID="a90acbac79902cdb52ef4fcea9ae5b11639ff143d7714083c5b50dadd7085c1b" Nov 25 17:47:02 crc kubenswrapper[4812]: I1125 17:47:02.339337 4812 scope.go:117] "RemoveContainer" containerID="099dcb306ebd7860d265853c8073d64c57a696f223cc0e459a6acff1f77c255a" Nov 25 17:47:03 crc kubenswrapper[4812]: E1125 17:47:03.436401 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:47:03 crc kubenswrapper[4812]: I1125 17:47:03.528809 4812 generic.go:334] "Generic (PLEG): container finished" podID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerID="d8a0f079b413904eb4de30b6e1e84a36ea8b997fb99057143595e540803392af" exitCode=0 Nov 25 17:47:03 crc kubenswrapper[4812]: I1125 17:47:03.529204 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"6d2f6681-d1d6-45e9-b0f4-65209caf0069","Type":"ContainerDied","Data":"d8a0f079b413904eb4de30b6e1e84a36ea8b997fb99057143595e540803392af"} Nov 25 17:47:03 crc kubenswrapper[4812]: I1125 17:47:03.529439 4812 scope.go:117] "RemoveContainer" containerID="cc81341589bedf078d8e6c38e6438372a5e8be2fdd7f9bd85d4ec454ce34725e" Nov 25 17:47:03 crc kubenswrapper[4812]: I1125 17:47:03.530964 4812 scope.go:117] "RemoveContainer" containerID="d8a0f079b413904eb4de30b6e1e84a36ea8b997fb99057143595e540803392af" Nov 25 17:47:03 crc kubenswrapper[4812]: E1125 17:47:03.531693 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:47:04 crc kubenswrapper[4812]: I1125 17:47:04.833156 4812 scope.go:117] "RemoveContainer" containerID="af9a1c4eb103063dbf8a3589a0292d12e52b5584993c70b3cf1f1f79bcc8a717" Nov 25 17:47:04 crc kubenswrapper[4812]: E1125 17:47:04.833975 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:47:15 crc kubenswrapper[4812]: I1125 17:47:15.840286 4812 scope.go:117] "RemoveContainer" containerID="af9a1c4eb103063dbf8a3589a0292d12e52b5584993c70b3cf1f1f79bcc8a717" Nov 25 17:47:15 crc kubenswrapper[4812]: E1125 17:47:15.841045 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:47:16 crc kubenswrapper[4812]: I1125 17:47:16.832946 4812 scope.go:117] "RemoveContainer" containerID="d8a0f079b413904eb4de30b6e1e84a36ea8b997fb99057143595e540803392af" Nov 25 17:47:16 crc kubenswrapper[4812]: E1125 17:47:16.833803 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:47:24 crc kubenswrapper[4812]: I1125 17:47:24.068300 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-db-sync-9ctdm"] Nov 25 17:47:24 crc kubenswrapper[4812]: I1125 17:47:24.076170 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-db-sync-9ctdm"] Nov 25 17:47:25 crc kubenswrapper[4812]: I1125 17:47:25.847043 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8a4c444-a96f-40ee-84ec-4bbd167d11c2" path="/var/lib/kubelet/pods/e8a4c444-a96f-40ee-84ec-4bbd167d11c2/volumes" Nov 25 17:47:26 crc kubenswrapper[4812]: I1125 17:47:26.831447 4812 scope.go:117] "RemoveContainer" containerID="af9a1c4eb103063dbf8a3589a0292d12e52b5584993c70b3cf1f1f79bcc8a717" Nov 25 17:47:26 crc kubenswrapper[4812]: E1125 17:47:26.832268 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:47:27 crc kubenswrapper[4812]: I1125 17:47:27.332875 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:47:27 crc kubenswrapper[4812]: I1125 17:47:27.333899 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:47:29 crc kubenswrapper[4812]: I1125 17:47:29.831848 4812 scope.go:117] "RemoveContainer" containerID="d8a0f079b413904eb4de30b6e1e84a36ea8b997fb99057143595e540803392af" Nov 25 17:47:29 crc kubenswrapper[4812]: E1125 17:47:29.832562 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:47:37 crc kubenswrapper[4812]: I1125 17:47:37.831929 4812 scope.go:117] "RemoveContainer" containerID="af9a1c4eb103063dbf8a3589a0292d12e52b5584993c70b3cf1f1f79bcc8a717" Nov 25 17:47:37 crc kubenswrapper[4812]: E1125 17:47:37.833157 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:47:44 crc kubenswrapper[4812]: I1125 17:47:44.831806 4812 scope.go:117] "RemoveContainer" containerID="d8a0f079b413904eb4de30b6e1e84a36ea8b997fb99057143595e540803392af" Nov 25 17:47:44 crc kubenswrapper[4812]: E1125 17:47:44.834188 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:47:51 crc kubenswrapper[4812]: I1125 17:47:51.832186 4812 scope.go:117] "RemoveContainer" containerID="af9a1c4eb103063dbf8a3589a0292d12e52b5584993c70b3cf1f1f79bcc8a717" Nov 25 17:47:51 crc kubenswrapper[4812]: E1125 17:47:51.833278 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:47:55 crc kubenswrapper[4812]: I1125 17:47:55.837660 4812 scope.go:117] "RemoveContainer" containerID="d8a0f079b413904eb4de30b6e1e84a36ea8b997fb99057143595e540803392af" Nov 25 17:47:55 crc kubenswrapper[4812]: E1125 17:47:55.838264 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:47:57 crc kubenswrapper[4812]: I1125 17:47:57.332776 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:47:57 crc kubenswrapper[4812]: I1125 17:47:57.332863 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:48:02 crc kubenswrapper[4812]: I1125 17:48:02.462790 4812 scope.go:117] "RemoveContainer" containerID="5cbaf4d0081c590f809a629c02693063b98c4a66896d33258c4b82bae09ddd98" Nov 25 17:48:06 crc kubenswrapper[4812]: I1125 17:48:06.832138 4812 scope.go:117] "RemoveContainer" containerID="d8a0f079b413904eb4de30b6e1e84a36ea8b997fb99057143595e540803392af" Nov 25 17:48:06 crc kubenswrapper[4812]: I1125 17:48:06.832790 4812 scope.go:117] "RemoveContainer" containerID="af9a1c4eb103063dbf8a3589a0292d12e52b5584993c70b3cf1f1f79bcc8a717" Nov 25 17:48:06 crc kubenswrapper[4812]: E1125 17:48:06.833001 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:48:06 crc kubenswrapper[4812]: E1125 17:48:06.833188 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:48:17 crc kubenswrapper[4812]: I1125 17:48:17.832567 4812 scope.go:117] "RemoveContainer" containerID="d8a0f079b413904eb4de30b6e1e84a36ea8b997fb99057143595e540803392af" Nov 25 17:48:17 crc kubenswrapper[4812]: E1125 17:48:17.833804 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:48:18 crc kubenswrapper[4812]: I1125 17:48:18.831944 4812 scope.go:117] "RemoveContainer" containerID="af9a1c4eb103063dbf8a3589a0292d12e52b5584993c70b3cf1f1f79bcc8a717" Nov 25 17:48:18 crc kubenswrapper[4812]: E1125 17:48:18.832634 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:48:27 crc kubenswrapper[4812]: I1125 17:48:27.332937 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:48:27 crc kubenswrapper[4812]: I1125 17:48:27.333367 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:48:27 crc kubenswrapper[4812]: I1125 17:48:27.333441 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" Nov 25 17:48:27 crc kubenswrapper[4812]: I1125 17:48:27.334577 4812 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8752772c4c7b9e552da7d3e8bd64ef561b7ab0875c65565697c2b0831f94b402"} pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 17:48:27 crc kubenswrapper[4812]: I1125 17:48:27.334684 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" containerID="cri-o://8752772c4c7b9e552da7d3e8bd64ef561b7ab0875c65565697c2b0831f94b402" gracePeriod=600 Nov 25 17:48:27 crc kubenswrapper[4812]: E1125 17:48:27.459066 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:48:27 crc kubenswrapper[4812]: I1125 17:48:27.504346 4812 generic.go:334] "Generic (PLEG): container finished" podID="8ed911cf-2139-4b12-84ba-af635585ba29" containerID="8752772c4c7b9e552da7d3e8bd64ef561b7ab0875c65565697c2b0831f94b402" exitCode=0 Nov 25 17:48:27 crc kubenswrapper[4812]: I1125 17:48:27.504436 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" event={"ID":"8ed911cf-2139-4b12-84ba-af635585ba29","Type":"ContainerDied","Data":"8752772c4c7b9e552da7d3e8bd64ef561b7ab0875c65565697c2b0831f94b402"} Nov 25 17:48:27 crc kubenswrapper[4812]: I1125 17:48:27.504491 4812 scope.go:117] "RemoveContainer" containerID="0acc40c7b955f4eaa4833a720fa59f543dc276d015523fb120f2e257ee7a126e" Nov 25 17:48:27 crc kubenswrapper[4812]: I1125 17:48:27.506623 4812 scope.go:117] "RemoveContainer" containerID="8752772c4c7b9e552da7d3e8bd64ef561b7ab0875c65565697c2b0831f94b402" Nov 25 17:48:27 crc kubenswrapper[4812]: E1125 17:48:27.507597 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:48:29 crc kubenswrapper[4812]: I1125 17:48:29.831801 4812 scope.go:117] "RemoveContainer" containerID="af9a1c4eb103063dbf8a3589a0292d12e52b5584993c70b3cf1f1f79bcc8a717" Nov 25 17:48:30 crc kubenswrapper[4812]: I1125 17:48:30.540231 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"8af2d4e3-790f-4ab4-92e8-1c0a083b9531","Type":"ContainerStarted","Data":"907b1f42477ecd1ab880f54a094faea3e7e9004a0876be6df8920d3cbc7ea89c"} Nov 25 17:48:32 crc kubenswrapper[4812]: I1125 17:48:32.569401 4812 generic.go:334] "Generic (PLEG): container finished" podID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" containerID="907b1f42477ecd1ab880f54a094faea3e7e9004a0876be6df8920d3cbc7ea89c" exitCode=1 Nov 25 17:48:32 crc kubenswrapper[4812]: I1125 17:48:32.569462 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"8af2d4e3-790f-4ab4-92e8-1c0a083b9531","Type":"ContainerDied","Data":"907b1f42477ecd1ab880f54a094faea3e7e9004a0876be6df8920d3cbc7ea89c"} Nov 25 17:48:32 crc kubenswrapper[4812]: I1125 17:48:32.570013 4812 scope.go:117] "RemoveContainer" containerID="af9a1c4eb103063dbf8a3589a0292d12e52b5584993c70b3cf1f1f79bcc8a717" Nov 25 17:48:32 crc kubenswrapper[4812]: I1125 17:48:32.570603 4812 scope.go:117] "RemoveContainer" containerID="907b1f42477ecd1ab880f54a094faea3e7e9004a0876be6df8920d3cbc7ea89c" Nov 25 17:48:32 crc kubenswrapper[4812]: E1125 17:48:32.571117 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:48:32 crc kubenswrapper[4812]: I1125 17:48:32.832337 4812 scope.go:117] "RemoveContainer" containerID="d8a0f079b413904eb4de30b6e1e84a36ea8b997fb99057143595e540803392af" Nov 25 17:48:32 crc kubenswrapper[4812]: E1125 17:48:32.832751 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:48:35 crc kubenswrapper[4812]: I1125 17:48:35.163707 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 25 17:48:35 crc kubenswrapper[4812]: I1125 17:48:35.164099 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 25 17:48:35 crc kubenswrapper[4812]: I1125 17:48:35.165053 4812 scope.go:117] "RemoveContainer" containerID="907b1f42477ecd1ab880f54a094faea3e7e9004a0876be6df8920d3cbc7ea89c" Nov 25 17:48:35 crc kubenswrapper[4812]: E1125 17:48:35.165521 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:48:41 crc kubenswrapper[4812]: I1125 17:48:41.832017 4812 scope.go:117] "RemoveContainer" containerID="8752772c4c7b9e552da7d3e8bd64ef561b7ab0875c65565697c2b0831f94b402" Nov 25 17:48:41 crc kubenswrapper[4812]: E1125 17:48:41.833177 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:48:45 crc kubenswrapper[4812]: I1125 17:48:45.164560 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 25 17:48:45 crc kubenswrapper[4812]: I1125 17:48:45.165800 4812 scope.go:117] "RemoveContainer" containerID="907b1f42477ecd1ab880f54a094faea3e7e9004a0876be6df8920d3cbc7ea89c" Nov 25 17:48:45 crc kubenswrapper[4812]: E1125 17:48:45.166119 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:48:45 crc kubenswrapper[4812]: I1125 17:48:45.848701 4812 scope.go:117] "RemoveContainer" containerID="d8a0f079b413904eb4de30b6e1e84a36ea8b997fb99057143595e540803392af" Nov 25 17:48:45 crc kubenswrapper[4812]: E1125 17:48:45.849443 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:48:54 crc kubenswrapper[4812]: I1125 17:48:54.832608 4812 scope.go:117] "RemoveContainer" containerID="8752772c4c7b9e552da7d3e8bd64ef561b7ab0875c65565697c2b0831f94b402" Nov 25 17:48:54 crc kubenswrapper[4812]: E1125 17:48:54.833492 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:48:56 crc kubenswrapper[4812]: I1125 17:48:56.831932 4812 scope.go:117] "RemoveContainer" containerID="907b1f42477ecd1ab880f54a094faea3e7e9004a0876be6df8920d3cbc7ea89c" Nov 25 17:48:56 crc kubenswrapper[4812]: E1125 17:48:56.832386 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:48:59 crc kubenswrapper[4812]: I1125 17:48:59.832190 4812 scope.go:117] "RemoveContainer" containerID="d8a0f079b413904eb4de30b6e1e84a36ea8b997fb99057143595e540803392af" Nov 25 17:48:59 crc kubenswrapper[4812]: E1125 17:48:59.833021 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:49:05 crc kubenswrapper[4812]: I1125 17:49:05.838187 4812 scope.go:117] "RemoveContainer" containerID="8752772c4c7b9e552da7d3e8bd64ef561b7ab0875c65565697c2b0831f94b402" Nov 25 17:49:05 crc kubenswrapper[4812]: E1125 17:49:05.839037 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:49:08 crc kubenswrapper[4812]: I1125 17:49:08.831619 4812 scope.go:117] "RemoveContainer" containerID="907b1f42477ecd1ab880f54a094faea3e7e9004a0876be6df8920d3cbc7ea89c" Nov 25 17:49:08 crc kubenswrapper[4812]: E1125 17:49:08.832134 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:49:12 crc kubenswrapper[4812]: I1125 17:49:12.832449 4812 scope.go:117] "RemoveContainer" containerID="d8a0f079b413904eb4de30b6e1e84a36ea8b997fb99057143595e540803392af" Nov 25 17:49:12 crc kubenswrapper[4812]: E1125 17:49:12.833437 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:49:16 crc kubenswrapper[4812]: I1125 17:49:16.831822 4812 scope.go:117] "RemoveContainer" containerID="8752772c4c7b9e552da7d3e8bd64ef561b7ab0875c65565697c2b0831f94b402" Nov 25 17:49:16 crc kubenswrapper[4812]: E1125 17:49:16.833520 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:49:23 crc kubenswrapper[4812]: I1125 17:49:23.833272 4812 scope.go:117] "RemoveContainer" containerID="907b1f42477ecd1ab880f54a094faea3e7e9004a0876be6df8920d3cbc7ea89c" Nov 25 17:49:23 crc kubenswrapper[4812]: E1125 17:49:23.834451 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:49:27 crc kubenswrapper[4812]: I1125 17:49:27.831993 4812 scope.go:117] "RemoveContainer" containerID="d8a0f079b413904eb4de30b6e1e84a36ea8b997fb99057143595e540803392af" Nov 25 17:49:27 crc kubenswrapper[4812]: E1125 17:49:27.832720 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:49:29 crc kubenswrapper[4812]: I1125 17:49:29.832170 4812 scope.go:117] "RemoveContainer" containerID="8752772c4c7b9e552da7d3e8bd64ef561b7ab0875c65565697c2b0831f94b402" Nov 25 17:49:29 crc kubenswrapper[4812]: E1125 17:49:29.832771 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:49:38 crc kubenswrapper[4812]: I1125 17:49:38.832657 4812 scope.go:117] "RemoveContainer" containerID="907b1f42477ecd1ab880f54a094faea3e7e9004a0876be6df8920d3cbc7ea89c" Nov 25 17:49:38 crc kubenswrapper[4812]: E1125 17:49:38.834084 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:49:41 crc kubenswrapper[4812]: I1125 17:49:41.832489 4812 scope.go:117] "RemoveContainer" containerID="d8a0f079b413904eb4de30b6e1e84a36ea8b997fb99057143595e540803392af" Nov 25 17:49:41 crc kubenswrapper[4812]: E1125 17:49:41.833455 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:49:42 crc kubenswrapper[4812]: I1125 17:49:42.832417 4812 scope.go:117] "RemoveContainer" containerID="8752772c4c7b9e552da7d3e8bd64ef561b7ab0875c65565697c2b0831f94b402" Nov 25 17:49:42 crc kubenswrapper[4812]: E1125 17:49:42.833281 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:49:53 crc kubenswrapper[4812]: I1125 17:49:53.833012 4812 scope.go:117] "RemoveContainer" containerID="907b1f42477ecd1ab880f54a094faea3e7e9004a0876be6df8920d3cbc7ea89c" Nov 25 17:49:53 crc kubenswrapper[4812]: I1125 17:49:53.833946 4812 scope.go:117] "RemoveContainer" containerID="8752772c4c7b9e552da7d3e8bd64ef561b7ab0875c65565697c2b0831f94b402" Nov 25 17:49:53 crc kubenswrapper[4812]: E1125 17:49:53.834219 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:49:53 crc kubenswrapper[4812]: E1125 17:49:53.834375 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:49:56 crc kubenswrapper[4812]: I1125 17:49:56.833079 4812 scope.go:117] "RemoveContainer" containerID="d8a0f079b413904eb4de30b6e1e84a36ea8b997fb99057143595e540803392af" Nov 25 17:49:56 crc kubenswrapper[4812]: E1125 17:49:56.834054 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:50:06 crc kubenswrapper[4812]: I1125 17:50:06.832293 4812 scope.go:117] "RemoveContainer" containerID="907b1f42477ecd1ab880f54a094faea3e7e9004a0876be6df8920d3cbc7ea89c" Nov 25 17:50:06 crc kubenswrapper[4812]: E1125 17:50:06.833511 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:50:08 crc kubenswrapper[4812]: I1125 17:50:08.831424 4812 scope.go:117] "RemoveContainer" containerID="8752772c4c7b9e552da7d3e8bd64ef561b7ab0875c65565697c2b0831f94b402" Nov 25 17:50:08 crc kubenswrapper[4812]: E1125 17:50:08.831999 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:50:10 crc kubenswrapper[4812]: I1125 17:50:10.832004 4812 scope.go:117] "RemoveContainer" containerID="d8a0f079b413904eb4de30b6e1e84a36ea8b997fb99057143595e540803392af" Nov 25 17:50:10 crc kubenswrapper[4812]: E1125 17:50:10.832424 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:50:18 crc kubenswrapper[4812]: I1125 17:50:18.076220 4812 scope.go:117] "RemoveContainer" containerID="907b1f42477ecd1ab880f54a094faea3e7e9004a0876be6df8920d3cbc7ea89c" Nov 25 17:50:18 crc kubenswrapper[4812]: E1125 17:50:18.077188 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:50:21 crc kubenswrapper[4812]: I1125 17:50:21.832130 4812 scope.go:117] "RemoveContainer" containerID="d8a0f079b413904eb4de30b6e1e84a36ea8b997fb99057143595e540803392af" Nov 25 17:50:21 crc kubenswrapper[4812]: I1125 17:50:21.832988 4812 scope.go:117] "RemoveContainer" containerID="8752772c4c7b9e552da7d3e8bd64ef561b7ab0875c65565697c2b0831f94b402" Nov 25 17:50:21 crc kubenswrapper[4812]: E1125 17:50:21.833393 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:50:21 crc kubenswrapper[4812]: E1125 17:50:21.833467 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:50:30 crc kubenswrapper[4812]: I1125 17:50:30.832036 4812 scope.go:117] "RemoveContainer" containerID="907b1f42477ecd1ab880f54a094faea3e7e9004a0876be6df8920d3cbc7ea89c" Nov 25 17:50:30 crc kubenswrapper[4812]: E1125 17:50:30.832981 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:50:34 crc kubenswrapper[4812]: I1125 17:50:34.832672 4812 scope.go:117] "RemoveContainer" containerID="d8a0f079b413904eb4de30b6e1e84a36ea8b997fb99057143595e540803392af" Nov 25 17:50:34 crc kubenswrapper[4812]: E1125 17:50:34.833551 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:50:36 crc kubenswrapper[4812]: I1125 17:50:36.831272 4812 scope.go:117] "RemoveContainer" containerID="8752772c4c7b9e552da7d3e8bd64ef561b7ab0875c65565697c2b0831f94b402" Nov 25 17:50:36 crc kubenswrapper[4812]: E1125 17:50:36.831779 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:50:44 crc kubenswrapper[4812]: I1125 17:50:44.831573 4812 scope.go:117] "RemoveContainer" containerID="907b1f42477ecd1ab880f54a094faea3e7e9004a0876be6df8920d3cbc7ea89c" Nov 25 17:50:44 crc kubenswrapper[4812]: E1125 17:50:44.832383 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:50:48 crc kubenswrapper[4812]: I1125 17:50:48.832491 4812 scope.go:117] "RemoveContainer" containerID="d8a0f079b413904eb4de30b6e1e84a36ea8b997fb99057143595e540803392af" Nov 25 17:50:48 crc kubenswrapper[4812]: E1125 17:50:48.833862 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:50:50 crc kubenswrapper[4812]: I1125 17:50:50.832635 4812 scope.go:117] "RemoveContainer" containerID="8752772c4c7b9e552da7d3e8bd64ef561b7ab0875c65565697c2b0831f94b402" Nov 25 17:50:50 crc kubenswrapper[4812]: E1125 17:50:50.833805 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:50:55 crc kubenswrapper[4812]: I1125 17:50:55.845581 4812 scope.go:117] "RemoveContainer" containerID="907b1f42477ecd1ab880f54a094faea3e7e9004a0876be6df8920d3cbc7ea89c" Nov 25 17:50:55 crc kubenswrapper[4812]: E1125 17:50:55.846357 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:51:00 crc kubenswrapper[4812]: I1125 17:51:00.831845 4812 scope.go:117] "RemoveContainer" containerID="d8a0f079b413904eb4de30b6e1e84a36ea8b997fb99057143595e540803392af" Nov 25 17:51:00 crc kubenswrapper[4812]: E1125 17:51:00.832520 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:51:01 crc kubenswrapper[4812]: I1125 17:51:01.831866 4812 scope.go:117] "RemoveContainer" containerID="8752772c4c7b9e552da7d3e8bd64ef561b7ab0875c65565697c2b0831f94b402" Nov 25 17:51:01 crc kubenswrapper[4812]: E1125 17:51:01.832676 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:51:02 crc kubenswrapper[4812]: I1125 17:51:02.623470 4812 scope.go:117] "RemoveContainer" containerID="0186d788a0d116b360c8eb8c6d4b40c42e0cdbe4cd4b8f485c0c96bddb322cf1" Nov 25 17:51:02 crc kubenswrapper[4812]: I1125 17:51:02.665054 4812 scope.go:117] "RemoveContainer" containerID="504bb5050064d0f702fc79bfe545ae1df13597562a41e6a023cd54c474932f4a" Nov 25 17:51:02 crc kubenswrapper[4812]: I1125 17:51:02.724915 4812 scope.go:117] "RemoveContainer" containerID="f9f5d488d807b0c072b7ad5ad4b3a13e905f4872faf5ccdb58ae0dcfb44a8a9e" Nov 25 17:51:08 crc kubenswrapper[4812]: I1125 17:51:08.831661 4812 scope.go:117] "RemoveContainer" containerID="907b1f42477ecd1ab880f54a094faea3e7e9004a0876be6df8920d3cbc7ea89c" Nov 25 17:51:08 crc kubenswrapper[4812]: E1125 17:51:08.832502 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:51:13 crc kubenswrapper[4812]: I1125 17:51:13.832172 4812 scope.go:117] "RemoveContainer" containerID="d8a0f079b413904eb4de30b6e1e84a36ea8b997fb99057143595e540803392af" Nov 25 17:51:13 crc kubenswrapper[4812]: E1125 17:51:13.833037 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:51:16 crc kubenswrapper[4812]: I1125 17:51:16.831646 4812 scope.go:117] "RemoveContainer" containerID="8752772c4c7b9e552da7d3e8bd64ef561b7ab0875c65565697c2b0831f94b402" Nov 25 17:51:16 crc kubenswrapper[4812]: E1125 17:51:16.832315 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:51:21 crc kubenswrapper[4812]: I1125 17:51:21.832000 4812 scope.go:117] "RemoveContainer" containerID="907b1f42477ecd1ab880f54a094faea3e7e9004a0876be6df8920d3cbc7ea89c" Nov 25 17:51:21 crc kubenswrapper[4812]: E1125 17:51:21.832473 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:51:28 crc kubenswrapper[4812]: I1125 17:51:28.831712 4812 scope.go:117] "RemoveContainer" containerID="d8a0f079b413904eb4de30b6e1e84a36ea8b997fb99057143595e540803392af" Nov 25 17:51:28 crc kubenswrapper[4812]: E1125 17:51:28.833786 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:51:29 crc kubenswrapper[4812]: I1125 17:51:29.831671 4812 scope.go:117] "RemoveContainer" containerID="8752772c4c7b9e552da7d3e8bd64ef561b7ab0875c65565697c2b0831f94b402" Nov 25 17:51:29 crc kubenswrapper[4812]: E1125 17:51:29.832250 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:51:32 crc kubenswrapper[4812]: I1125 17:51:32.831867 4812 scope.go:117] "RemoveContainer" containerID="907b1f42477ecd1ab880f54a094faea3e7e9004a0876be6df8920d3cbc7ea89c" Nov 25 17:51:32 crc kubenswrapper[4812]: E1125 17:51:32.833051 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:51:40 crc kubenswrapper[4812]: I1125 17:51:40.831813 4812 scope.go:117] "RemoveContainer" containerID="d8a0f079b413904eb4de30b6e1e84a36ea8b997fb99057143595e540803392af" Nov 25 17:51:40 crc kubenswrapper[4812]: E1125 17:51:40.832570 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:51:42 crc kubenswrapper[4812]: I1125 17:51:42.832476 4812 scope.go:117] "RemoveContainer" containerID="8752772c4c7b9e552da7d3e8bd64ef561b7ab0875c65565697c2b0831f94b402" Nov 25 17:51:42 crc kubenswrapper[4812]: E1125 17:51:42.833702 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:51:45 crc kubenswrapper[4812]: I1125 17:51:45.860269 4812 scope.go:117] "RemoveContainer" containerID="907b1f42477ecd1ab880f54a094faea3e7e9004a0876be6df8920d3cbc7ea89c" Nov 25 17:51:45 crc kubenswrapper[4812]: E1125 17:51:45.861708 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:51:51 crc kubenswrapper[4812]: I1125 17:51:51.832364 4812 scope.go:117] "RemoveContainer" containerID="d8a0f079b413904eb4de30b6e1e84a36ea8b997fb99057143595e540803392af" Nov 25 17:51:51 crc kubenswrapper[4812]: E1125 17:51:51.833611 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:51:55 crc kubenswrapper[4812]: I1125 17:51:55.844991 4812 scope.go:117] "RemoveContainer" containerID="8752772c4c7b9e552da7d3e8bd64ef561b7ab0875c65565697c2b0831f94b402" Nov 25 17:51:55 crc kubenswrapper[4812]: E1125 17:51:55.845913 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:51:58 crc kubenswrapper[4812]: I1125 17:51:58.831107 4812 scope.go:117] "RemoveContainer" containerID="907b1f42477ecd1ab880f54a094faea3e7e9004a0876be6df8920d3cbc7ea89c" Nov 25 17:51:58 crc kubenswrapper[4812]: E1125 17:51:58.831934 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:52:02 crc kubenswrapper[4812]: I1125 17:52:02.832356 4812 scope.go:117] "RemoveContainer" containerID="d8a0f079b413904eb4de30b6e1e84a36ea8b997fb99057143595e540803392af" Nov 25 17:52:02 crc kubenswrapper[4812]: E1125 17:52:02.834349 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:52:09 crc kubenswrapper[4812]: I1125 17:52:09.831583 4812 scope.go:117] "RemoveContainer" containerID="8752772c4c7b9e552da7d3e8bd64ef561b7ab0875c65565697c2b0831f94b402" Nov 25 17:52:09 crc kubenswrapper[4812]: E1125 17:52:09.832346 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:52:11 crc kubenswrapper[4812]: I1125 17:52:11.835846 4812 scope.go:117] "RemoveContainer" containerID="907b1f42477ecd1ab880f54a094faea3e7e9004a0876be6df8920d3cbc7ea89c" Nov 25 17:52:11 crc kubenswrapper[4812]: E1125 17:52:11.837257 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:52:15 crc kubenswrapper[4812]: I1125 17:52:15.846124 4812 scope.go:117] "RemoveContainer" containerID="d8a0f079b413904eb4de30b6e1e84a36ea8b997fb99057143595e540803392af" Nov 25 17:52:16 crc kubenswrapper[4812]: I1125 17:52:16.366410 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"6d2f6681-d1d6-45e9-b0f4-65209caf0069","Type":"ContainerStarted","Data":"2be3ef45138efcad527f289532a21ebeeeea82c6bd69b276033cfe38d2a331f5"} Nov 25 17:52:16 crc kubenswrapper[4812]: I1125 17:52:16.366939 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/manila-api-0" Nov 25 17:52:23 crc kubenswrapper[4812]: I1125 17:52:23.832125 4812 scope.go:117] "RemoveContainer" containerID="907b1f42477ecd1ab880f54a094faea3e7e9004a0876be6df8920d3cbc7ea89c" Nov 25 17:52:23 crc kubenswrapper[4812]: I1125 17:52:23.832830 4812 scope.go:117] "RemoveContainer" containerID="8752772c4c7b9e552da7d3e8bd64ef561b7ab0875c65565697c2b0831f94b402" Nov 25 17:52:23 crc kubenswrapper[4812]: E1125 17:52:23.833048 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:52:23 crc kubenswrapper[4812]: E1125 17:52:23.833427 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:52:31 crc kubenswrapper[4812]: I1125 17:52:31.306894 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:52:31 crc kubenswrapper[4812]: I1125 17:52:31.464433 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:52:37 crc kubenswrapper[4812]: I1125 17:52:37.831671 4812 scope.go:117] "RemoveContainer" containerID="907b1f42477ecd1ab880f54a094faea3e7e9004a0876be6df8920d3cbc7ea89c" Nov 25 17:52:37 crc kubenswrapper[4812]: E1125 17:52:37.832396 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:52:37 crc kubenswrapper[4812]: I1125 17:52:37.832401 4812 scope.go:117] "RemoveContainer" containerID="8752772c4c7b9e552da7d3e8bd64ef561b7ab0875c65565697c2b0831f94b402" Nov 25 17:52:37 crc kubenswrapper[4812]: E1125 17:52:37.832852 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:52:41 crc kubenswrapper[4812]: I1125 17:52:41.090940 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:52:41 crc kubenswrapper[4812]: I1125 17:52:41.130357 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:52:48 crc kubenswrapper[4812]: I1125 17:52:48.832131 4812 scope.go:117] "RemoveContainer" containerID="907b1f42477ecd1ab880f54a094faea3e7e9004a0876be6df8920d3cbc7ea89c" Nov 25 17:52:48 crc kubenswrapper[4812]: E1125 17:52:48.832966 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:52:50 crc kubenswrapper[4812]: I1125 17:52:50.195636 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:52:50 crc kubenswrapper[4812]: I1125 17:52:50.196294 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:52:50 crc kubenswrapper[4812]: I1125 17:52:50.196399 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/manila-api-0" Nov 25 17:52:50 crc kubenswrapper[4812]: I1125 17:52:50.197369 4812 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="manila-api" containerStatusID={"Type":"cri-o","ID":"2be3ef45138efcad527f289532a21ebeeeea82c6bd69b276033cfe38d2a331f5"} pod="openstack/manila-api-0" containerMessage="Container manila-api failed liveness probe, will be restarted" Nov 25 17:52:50 crc kubenswrapper[4812]: I1125 17:52:50.197446 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" containerID="cri-o://2be3ef45138efcad527f289532a21ebeeeea82c6bd69b276033cfe38d2a331f5" gracePeriod=30 Nov 25 17:52:50 crc kubenswrapper[4812]: I1125 17:52:50.205022 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="Get \"https://10.217.1.1:8786/healthcheck\": read tcp 10.217.0.2:39910->10.217.1.1:8786: read: connection reset by peer" Nov 25 17:52:51 crc kubenswrapper[4812]: I1125 17:52:51.831412 4812 scope.go:117] "RemoveContainer" containerID="8752772c4c7b9e552da7d3e8bd64ef561b7ab0875c65565697c2b0831f94b402" Nov 25 17:52:51 crc kubenswrapper[4812]: E1125 17:52:51.831813 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:52:53 crc kubenswrapper[4812]: I1125 17:52:53.773170 4812 generic.go:334] "Generic (PLEG): container finished" podID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerID="2be3ef45138efcad527f289532a21ebeeeea82c6bd69b276033cfe38d2a331f5" exitCode=0 Nov 25 17:52:53 crc kubenswrapper[4812]: I1125 17:52:53.773224 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"6d2f6681-d1d6-45e9-b0f4-65209caf0069","Type":"ContainerDied","Data":"2be3ef45138efcad527f289532a21ebeeeea82c6bd69b276033cfe38d2a331f5"} Nov 25 17:52:53 crc kubenswrapper[4812]: I1125 17:52:53.773500 4812 scope.go:117] "RemoveContainer" containerID="d8a0f079b413904eb4de30b6e1e84a36ea8b997fb99057143595e540803392af" Nov 25 17:52:54 crc kubenswrapper[4812]: I1125 17:52:54.788092 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"6d2f6681-d1d6-45e9-b0f4-65209caf0069","Type":"ContainerStarted","Data":"117cdbf49a8d337558287c76d66fbfb9c8293bf1bb8907ad48cf2c0cf9b9d454"} Nov 25 17:52:54 crc kubenswrapper[4812]: I1125 17:52:54.788479 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/manila-api-0" Nov 25 17:53:02 crc kubenswrapper[4812]: I1125 17:53:02.831811 4812 scope.go:117] "RemoveContainer" containerID="8752772c4c7b9e552da7d3e8bd64ef561b7ab0875c65565697c2b0831f94b402" Nov 25 17:53:02 crc kubenswrapper[4812]: E1125 17:53:02.832558 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:53:03 crc kubenswrapper[4812]: I1125 17:53:03.831758 4812 scope.go:117] "RemoveContainer" containerID="907b1f42477ecd1ab880f54a094faea3e7e9004a0876be6df8920d3cbc7ea89c" Nov 25 17:53:03 crc kubenswrapper[4812]: E1125 17:53:03.832250 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:53:11 crc kubenswrapper[4812]: I1125 17:53:11.233201 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:53:11 crc kubenswrapper[4812]: I1125 17:53:11.255876 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:53:13 crc kubenswrapper[4812]: I1125 17:53:13.832160 4812 scope.go:117] "RemoveContainer" containerID="8752772c4c7b9e552da7d3e8bd64ef561b7ab0875c65565697c2b0831f94b402" Nov 25 17:53:13 crc kubenswrapper[4812]: E1125 17:53:13.833204 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:53:15 crc kubenswrapper[4812]: I1125 17:53:15.838174 4812 scope.go:117] "RemoveContainer" containerID="907b1f42477ecd1ab880f54a094faea3e7e9004a0876be6df8920d3cbc7ea89c" Nov 25 17:53:15 crc kubenswrapper[4812]: E1125 17:53:15.838957 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:53:21 crc kubenswrapper[4812]: I1125 17:53:21.232109 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:53:21 crc kubenswrapper[4812]: I1125 17:53:21.286387 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:53:28 crc kubenswrapper[4812]: I1125 17:53:28.838300 4812 scope.go:117] "RemoveContainer" containerID="8752772c4c7b9e552da7d3e8bd64ef561b7ab0875c65565697c2b0831f94b402" Nov 25 17:53:29 crc kubenswrapper[4812]: I1125 17:53:29.180420 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" event={"ID":"8ed911cf-2139-4b12-84ba-af635585ba29","Type":"ContainerStarted","Data":"065a9508690695f07a5fa4b09908b82a828df3246cb3fdae1d8a1ce83f741172"} Nov 25 17:53:29 crc kubenswrapper[4812]: I1125 17:53:29.834359 4812 scope.go:117] "RemoveContainer" containerID="907b1f42477ecd1ab880f54a094faea3e7e9004a0876be6df8920d3cbc7ea89c" Nov 25 17:53:29 crc kubenswrapper[4812]: E1125 17:53:29.835805 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:53:30 crc kubenswrapper[4812]: I1125 17:53:30.231674 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:53:30 crc kubenswrapper[4812]: I1125 17:53:30.232402 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/manila-api-0" Nov 25 17:53:30 crc kubenswrapper[4812]: I1125 17:53:30.233926 4812 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="manila-api" containerStatusID={"Type":"cri-o","ID":"117cdbf49a8d337558287c76d66fbfb9c8293bf1bb8907ad48cf2c0cf9b9d454"} pod="openstack/manila-api-0" containerMessage="Container manila-api failed liveness probe, will be restarted" Nov 25 17:53:30 crc kubenswrapper[4812]: I1125 17:53:30.233990 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" containerID="cri-o://117cdbf49a8d337558287c76d66fbfb9c8293bf1bb8907ad48cf2c0cf9b9d454" gracePeriod=30 Nov 25 17:53:30 crc kubenswrapper[4812]: I1125 17:53:30.263969 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="Get \"https://10.217.1.1:8786/healthcheck\": EOF" Nov 25 17:53:33 crc kubenswrapper[4812]: I1125 17:53:33.373990 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="Get \"https://10.217.1.1:8786/healthcheck\": read tcp 10.217.0.2:57168->10.217.1.1:8786: read: connection reset by peer" Nov 25 17:53:33 crc kubenswrapper[4812]: E1125 17:53:33.462453 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:53:34 crc kubenswrapper[4812]: I1125 17:53:34.234450 4812 generic.go:334] "Generic (PLEG): container finished" podID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerID="117cdbf49a8d337558287c76d66fbfb9c8293bf1bb8907ad48cf2c0cf9b9d454" exitCode=0 Nov 25 17:53:34 crc kubenswrapper[4812]: I1125 17:53:34.234565 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"6d2f6681-d1d6-45e9-b0f4-65209caf0069","Type":"ContainerDied","Data":"117cdbf49a8d337558287c76d66fbfb9c8293bf1bb8907ad48cf2c0cf9b9d454"} Nov 25 17:53:34 crc kubenswrapper[4812]: I1125 17:53:34.234893 4812 scope.go:117] "RemoveContainer" containerID="2be3ef45138efcad527f289532a21ebeeeea82c6bd69b276033cfe38d2a331f5" Nov 25 17:53:34 crc kubenswrapper[4812]: I1125 17:53:34.235825 4812 scope.go:117] "RemoveContainer" containerID="117cdbf49a8d337558287c76d66fbfb9c8293bf1bb8907ad48cf2c0cf9b9d454" Nov 25 17:53:34 crc kubenswrapper[4812]: E1125 17:53:34.236128 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:53:44 crc kubenswrapper[4812]: I1125 17:53:44.831984 4812 scope.go:117] "RemoveContainer" containerID="907b1f42477ecd1ab880f54a094faea3e7e9004a0876be6df8920d3cbc7ea89c" Nov 25 17:53:45 crc kubenswrapper[4812]: I1125 17:53:45.350942 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"8af2d4e3-790f-4ab4-92e8-1c0a083b9531","Type":"ContainerStarted","Data":"98ad524e4fa76dbd5db0b77f4f3172a868bd73317b3f452a42d17221f9caab33"} Nov 25 17:53:47 crc kubenswrapper[4812]: I1125 17:53:47.376864 4812 generic.go:334] "Generic (PLEG): container finished" podID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" containerID="98ad524e4fa76dbd5db0b77f4f3172a868bd73317b3f452a42d17221f9caab33" exitCode=1 Nov 25 17:53:47 crc kubenswrapper[4812]: I1125 17:53:47.376913 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"8af2d4e3-790f-4ab4-92e8-1c0a083b9531","Type":"ContainerDied","Data":"98ad524e4fa76dbd5db0b77f4f3172a868bd73317b3f452a42d17221f9caab33"} Nov 25 17:53:47 crc kubenswrapper[4812]: I1125 17:53:47.377586 4812 scope.go:117] "RemoveContainer" containerID="907b1f42477ecd1ab880f54a094faea3e7e9004a0876be6df8920d3cbc7ea89c" Nov 25 17:53:47 crc kubenswrapper[4812]: I1125 17:53:47.379928 4812 scope.go:117] "RemoveContainer" containerID="98ad524e4fa76dbd5db0b77f4f3172a868bd73317b3f452a42d17221f9caab33" Nov 25 17:53:47 crc kubenswrapper[4812]: E1125 17:53:47.381837 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:53:48 crc kubenswrapper[4812]: I1125 17:53:48.832452 4812 scope.go:117] "RemoveContainer" containerID="117cdbf49a8d337558287c76d66fbfb9c8293bf1bb8907ad48cf2c0cf9b9d454" Nov 25 17:53:48 crc kubenswrapper[4812]: E1125 17:53:48.833170 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:53:55 crc kubenswrapper[4812]: I1125 17:53:55.163861 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 25 17:53:55 crc kubenswrapper[4812]: I1125 17:53:55.164723 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 25 17:53:55 crc kubenswrapper[4812]: I1125 17:53:55.165652 4812 scope.go:117] "RemoveContainer" containerID="98ad524e4fa76dbd5db0b77f4f3172a868bd73317b3f452a42d17221f9caab33" Nov 25 17:53:55 crc kubenswrapper[4812]: E1125 17:53:55.166126 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:54:02 crc kubenswrapper[4812]: I1125 17:54:02.831828 4812 scope.go:117] "RemoveContainer" containerID="117cdbf49a8d337558287c76d66fbfb9c8293bf1bb8907ad48cf2c0cf9b9d454" Nov 25 17:54:02 crc kubenswrapper[4812]: E1125 17:54:02.832835 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:54:05 crc kubenswrapper[4812]: I1125 17:54:05.164423 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 25 17:54:05 crc kubenswrapper[4812]: I1125 17:54:05.165767 4812 scope.go:117] "RemoveContainer" containerID="98ad524e4fa76dbd5db0b77f4f3172a868bd73317b3f452a42d17221f9caab33" Nov 25 17:54:05 crc kubenswrapper[4812]: E1125 17:54:05.166170 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:54:14 crc kubenswrapper[4812]: I1125 17:54:14.831672 4812 scope.go:117] "RemoveContainer" containerID="117cdbf49a8d337558287c76d66fbfb9c8293bf1bb8907ad48cf2c0cf9b9d454" Nov 25 17:54:14 crc kubenswrapper[4812]: E1125 17:54:14.832663 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:54:19 crc kubenswrapper[4812]: I1125 17:54:19.832350 4812 scope.go:117] "RemoveContainer" containerID="98ad524e4fa76dbd5db0b77f4f3172a868bd73317b3f452a42d17221f9caab33" Nov 25 17:54:19 crc kubenswrapper[4812]: E1125 17:54:19.833291 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:54:26 crc kubenswrapper[4812]: I1125 17:54:26.831484 4812 scope.go:117] "RemoveContainer" containerID="117cdbf49a8d337558287c76d66fbfb9c8293bf1bb8907ad48cf2c0cf9b9d454" Nov 25 17:54:26 crc kubenswrapper[4812]: E1125 17:54:26.832295 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:54:33 crc kubenswrapper[4812]: I1125 17:54:33.775098 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-chsxd"] Nov 25 17:54:33 crc kubenswrapper[4812]: E1125 17:54:33.776447 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a515c99-5fb7-460a-bec8-caadf9a053f4" containerName="extract-utilities" Nov 25 17:54:33 crc kubenswrapper[4812]: I1125 17:54:33.776472 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a515c99-5fb7-460a-bec8-caadf9a053f4" containerName="extract-utilities" Nov 25 17:54:33 crc kubenswrapper[4812]: E1125 17:54:33.776519 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a515c99-5fb7-460a-bec8-caadf9a053f4" containerName="extract-content" Nov 25 17:54:33 crc kubenswrapper[4812]: I1125 17:54:33.776564 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a515c99-5fb7-460a-bec8-caadf9a053f4" containerName="extract-content" Nov 25 17:54:33 crc kubenswrapper[4812]: E1125 17:54:33.776614 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a515c99-5fb7-460a-bec8-caadf9a053f4" containerName="registry-server" Nov 25 17:54:33 crc kubenswrapper[4812]: I1125 17:54:33.776629 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a515c99-5fb7-460a-bec8-caadf9a053f4" containerName="registry-server" Nov 25 17:54:33 crc kubenswrapper[4812]: I1125 17:54:33.777004 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a515c99-5fb7-460a-bec8-caadf9a053f4" containerName="registry-server" Nov 25 17:54:33 crc kubenswrapper[4812]: I1125 17:54:33.779261 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-chsxd" Nov 25 17:54:33 crc kubenswrapper[4812]: I1125 17:54:33.798286 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-chsxd"] Nov 25 17:54:33 crc kubenswrapper[4812]: I1125 17:54:33.833157 4812 scope.go:117] "RemoveContainer" containerID="98ad524e4fa76dbd5db0b77f4f3172a868bd73317b3f452a42d17221f9caab33" Nov 25 17:54:33 crc kubenswrapper[4812]: E1125 17:54:33.833400 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:54:33 crc kubenswrapper[4812]: I1125 17:54:33.902708 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e70d3b2e-8a38-4505-97e9-2cf4c378bcff-catalog-content\") pod \"redhat-marketplace-chsxd\" (UID: \"e70d3b2e-8a38-4505-97e9-2cf4c378bcff\") " pod="openshift-marketplace/redhat-marketplace-chsxd" Nov 25 17:54:33 crc kubenswrapper[4812]: I1125 17:54:33.902819 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jtr2g\" (UniqueName: \"kubernetes.io/projected/e70d3b2e-8a38-4505-97e9-2cf4c378bcff-kube-api-access-jtr2g\") pod \"redhat-marketplace-chsxd\" (UID: \"e70d3b2e-8a38-4505-97e9-2cf4c378bcff\") " pod="openshift-marketplace/redhat-marketplace-chsxd" Nov 25 17:54:33 crc kubenswrapper[4812]: I1125 17:54:33.902860 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e70d3b2e-8a38-4505-97e9-2cf4c378bcff-utilities\") pod \"redhat-marketplace-chsxd\" (UID: \"e70d3b2e-8a38-4505-97e9-2cf4c378bcff\") " pod="openshift-marketplace/redhat-marketplace-chsxd" Nov 25 17:54:34 crc kubenswrapper[4812]: I1125 17:54:34.004969 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e70d3b2e-8a38-4505-97e9-2cf4c378bcff-catalog-content\") pod \"redhat-marketplace-chsxd\" (UID: \"e70d3b2e-8a38-4505-97e9-2cf4c378bcff\") " pod="openshift-marketplace/redhat-marketplace-chsxd" Nov 25 17:54:34 crc kubenswrapper[4812]: I1125 17:54:34.005057 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtr2g\" (UniqueName: \"kubernetes.io/projected/e70d3b2e-8a38-4505-97e9-2cf4c378bcff-kube-api-access-jtr2g\") pod \"redhat-marketplace-chsxd\" (UID: \"e70d3b2e-8a38-4505-97e9-2cf4c378bcff\") " pod="openshift-marketplace/redhat-marketplace-chsxd" Nov 25 17:54:34 crc kubenswrapper[4812]: I1125 17:54:34.005121 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e70d3b2e-8a38-4505-97e9-2cf4c378bcff-utilities\") pod \"redhat-marketplace-chsxd\" (UID: \"e70d3b2e-8a38-4505-97e9-2cf4c378bcff\") " pod="openshift-marketplace/redhat-marketplace-chsxd" Nov 25 17:54:34 crc kubenswrapper[4812]: I1125 17:54:34.006082 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e70d3b2e-8a38-4505-97e9-2cf4c378bcff-catalog-content\") pod \"redhat-marketplace-chsxd\" (UID: \"e70d3b2e-8a38-4505-97e9-2cf4c378bcff\") " pod="openshift-marketplace/redhat-marketplace-chsxd" Nov 25 17:54:34 crc kubenswrapper[4812]: I1125 17:54:34.006339 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e70d3b2e-8a38-4505-97e9-2cf4c378bcff-utilities\") pod \"redhat-marketplace-chsxd\" (UID: \"e70d3b2e-8a38-4505-97e9-2cf4c378bcff\") " pod="openshift-marketplace/redhat-marketplace-chsxd" Nov 25 17:54:34 crc kubenswrapper[4812]: I1125 17:54:34.028294 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jtr2g\" (UniqueName: \"kubernetes.io/projected/e70d3b2e-8a38-4505-97e9-2cf4c378bcff-kube-api-access-jtr2g\") pod \"redhat-marketplace-chsxd\" (UID: \"e70d3b2e-8a38-4505-97e9-2cf4c378bcff\") " pod="openshift-marketplace/redhat-marketplace-chsxd" Nov 25 17:54:34 crc kubenswrapper[4812]: I1125 17:54:34.099125 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-chsxd" Nov 25 17:54:34 crc kubenswrapper[4812]: I1125 17:54:34.545899 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-chsxd"] Nov 25 17:54:34 crc kubenswrapper[4812]: I1125 17:54:34.951420 4812 generic.go:334] "Generic (PLEG): container finished" podID="e70d3b2e-8a38-4505-97e9-2cf4c378bcff" containerID="8bd50bf12c13e1b866695c45f94fbcf61e67a3cab65126dc48d7f9ffa13cf54d" exitCode=0 Nov 25 17:54:34 crc kubenswrapper[4812]: I1125 17:54:34.951757 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-chsxd" event={"ID":"e70d3b2e-8a38-4505-97e9-2cf4c378bcff","Type":"ContainerDied","Data":"8bd50bf12c13e1b866695c45f94fbcf61e67a3cab65126dc48d7f9ffa13cf54d"} Nov 25 17:54:34 crc kubenswrapper[4812]: I1125 17:54:34.951888 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-chsxd" event={"ID":"e70d3b2e-8a38-4505-97e9-2cf4c378bcff","Type":"ContainerStarted","Data":"c52d2b6920030f48c949741997828d3a82b09c6f6fa99fd519d85c98402f9b4a"} Nov 25 17:54:34 crc kubenswrapper[4812]: I1125 17:54:34.955739 4812 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 17:54:35 crc kubenswrapper[4812]: I1125 17:54:35.983939 4812 generic.go:334] "Generic (PLEG): container finished" podID="e70d3b2e-8a38-4505-97e9-2cf4c378bcff" containerID="3d360b8c60ef4abe3578c75091191b417674b13414bb0e7c12008f176487a194" exitCode=0 Nov 25 17:54:35 crc kubenswrapper[4812]: I1125 17:54:35.984664 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-chsxd" event={"ID":"e70d3b2e-8a38-4505-97e9-2cf4c378bcff","Type":"ContainerDied","Data":"3d360b8c60ef4abe3578c75091191b417674b13414bb0e7c12008f176487a194"} Nov 25 17:54:36 crc kubenswrapper[4812]: I1125 17:54:36.994613 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-chsxd" event={"ID":"e70d3b2e-8a38-4505-97e9-2cf4c378bcff","Type":"ContainerStarted","Data":"d2d79963adfa1b1e863554c729df6f2d9cc49c54e8671c700aec828978be084e"} Nov 25 17:54:37 crc kubenswrapper[4812]: I1125 17:54:37.011944 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-chsxd" podStartSLOduration=2.536522001 podStartE2EDuration="4.011924646s" podCreationTimestamp="2025-11-25 17:54:33 +0000 UTC" firstStartedPulling="2025-11-25 17:54:34.95524282 +0000 UTC m=+4049.795384945" lastFinishedPulling="2025-11-25 17:54:36.430645495 +0000 UTC m=+4051.270787590" observedRunningTime="2025-11-25 17:54:37.009249114 +0000 UTC m=+4051.849391209" watchObservedRunningTime="2025-11-25 17:54:37.011924646 +0000 UTC m=+4051.852066741" Nov 25 17:54:38 crc kubenswrapper[4812]: I1125 17:54:38.832248 4812 scope.go:117] "RemoveContainer" containerID="117cdbf49a8d337558287c76d66fbfb9c8293bf1bb8907ad48cf2c0cf9b9d454" Nov 25 17:54:38 crc kubenswrapper[4812]: E1125 17:54:38.832730 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:54:41 crc kubenswrapper[4812]: I1125 17:54:41.475015 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-2zttm"] Nov 25 17:54:41 crc kubenswrapper[4812]: I1125 17:54:41.486012 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2zttm" Nov 25 17:54:41 crc kubenswrapper[4812]: I1125 17:54:41.493229 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2zttm"] Nov 25 17:54:41 crc kubenswrapper[4812]: I1125 17:54:41.565899 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pbsb9\" (UniqueName: \"kubernetes.io/projected/d92a641e-2595-439b-b9f1-685a9b637115-kube-api-access-pbsb9\") pod \"redhat-operators-2zttm\" (UID: \"d92a641e-2595-439b-b9f1-685a9b637115\") " pod="openshift-marketplace/redhat-operators-2zttm" Nov 25 17:54:41 crc kubenswrapper[4812]: I1125 17:54:41.566739 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d92a641e-2595-439b-b9f1-685a9b637115-catalog-content\") pod \"redhat-operators-2zttm\" (UID: \"d92a641e-2595-439b-b9f1-685a9b637115\") " pod="openshift-marketplace/redhat-operators-2zttm" Nov 25 17:54:41 crc kubenswrapper[4812]: I1125 17:54:41.566772 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d92a641e-2595-439b-b9f1-685a9b637115-utilities\") pod \"redhat-operators-2zttm\" (UID: \"d92a641e-2595-439b-b9f1-685a9b637115\") " pod="openshift-marketplace/redhat-operators-2zttm" Nov 25 17:54:41 crc kubenswrapper[4812]: I1125 17:54:41.668457 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pbsb9\" (UniqueName: \"kubernetes.io/projected/d92a641e-2595-439b-b9f1-685a9b637115-kube-api-access-pbsb9\") pod \"redhat-operators-2zttm\" (UID: \"d92a641e-2595-439b-b9f1-685a9b637115\") " pod="openshift-marketplace/redhat-operators-2zttm" Nov 25 17:54:41 crc kubenswrapper[4812]: I1125 17:54:41.668579 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d92a641e-2595-439b-b9f1-685a9b637115-catalog-content\") pod \"redhat-operators-2zttm\" (UID: \"d92a641e-2595-439b-b9f1-685a9b637115\") " pod="openshift-marketplace/redhat-operators-2zttm" Nov 25 17:54:41 crc kubenswrapper[4812]: I1125 17:54:41.668615 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d92a641e-2595-439b-b9f1-685a9b637115-utilities\") pod \"redhat-operators-2zttm\" (UID: \"d92a641e-2595-439b-b9f1-685a9b637115\") " pod="openshift-marketplace/redhat-operators-2zttm" Nov 25 17:54:41 crc kubenswrapper[4812]: I1125 17:54:41.669179 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d92a641e-2595-439b-b9f1-685a9b637115-utilities\") pod \"redhat-operators-2zttm\" (UID: \"d92a641e-2595-439b-b9f1-685a9b637115\") " pod="openshift-marketplace/redhat-operators-2zttm" Nov 25 17:54:41 crc kubenswrapper[4812]: I1125 17:54:41.669349 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d92a641e-2595-439b-b9f1-685a9b637115-catalog-content\") pod \"redhat-operators-2zttm\" (UID: \"d92a641e-2595-439b-b9f1-685a9b637115\") " pod="openshift-marketplace/redhat-operators-2zttm" Nov 25 17:54:41 crc kubenswrapper[4812]: I1125 17:54:41.693455 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pbsb9\" (UniqueName: \"kubernetes.io/projected/d92a641e-2595-439b-b9f1-685a9b637115-kube-api-access-pbsb9\") pod \"redhat-operators-2zttm\" (UID: \"d92a641e-2595-439b-b9f1-685a9b637115\") " pod="openshift-marketplace/redhat-operators-2zttm" Nov 25 17:54:41 crc kubenswrapper[4812]: I1125 17:54:41.825349 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2zttm" Nov 25 17:54:42 crc kubenswrapper[4812]: I1125 17:54:42.259596 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2zttm"] Nov 25 17:54:43 crc kubenswrapper[4812]: I1125 17:54:43.068264 4812 generic.go:334] "Generic (PLEG): container finished" podID="d92a641e-2595-439b-b9f1-685a9b637115" containerID="6d71dcdb742c6011abce6021c51848ff316883af6163750a31ec55103908d1eb" exitCode=0 Nov 25 17:54:43 crc kubenswrapper[4812]: I1125 17:54:43.068325 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2zttm" event={"ID":"d92a641e-2595-439b-b9f1-685a9b637115","Type":"ContainerDied","Data":"6d71dcdb742c6011abce6021c51848ff316883af6163750a31ec55103908d1eb"} Nov 25 17:54:43 crc kubenswrapper[4812]: I1125 17:54:43.068567 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2zttm" event={"ID":"d92a641e-2595-439b-b9f1-685a9b637115","Type":"ContainerStarted","Data":"83de964f7fc462a3cab1dd1307697abc15bada73a273724c41a9e643ef21c645"} Nov 25 17:54:44 crc kubenswrapper[4812]: I1125 17:54:44.088488 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2zttm" event={"ID":"d92a641e-2595-439b-b9f1-685a9b637115","Type":"ContainerStarted","Data":"3bca287664b04ac5000c5628de317bfbd17e8e45dc86ae607d8c8a50a5c50e8e"} Nov 25 17:54:44 crc kubenswrapper[4812]: I1125 17:54:44.099806 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-chsxd" Nov 25 17:54:44 crc kubenswrapper[4812]: I1125 17:54:44.100017 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-chsxd" Nov 25 17:54:44 crc kubenswrapper[4812]: I1125 17:54:44.167700 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-chsxd" Nov 25 17:54:45 crc kubenswrapper[4812]: I1125 17:54:45.108378 4812 generic.go:334] "Generic (PLEG): container finished" podID="d92a641e-2595-439b-b9f1-685a9b637115" containerID="3bca287664b04ac5000c5628de317bfbd17e8e45dc86ae607d8c8a50a5c50e8e" exitCode=0 Nov 25 17:54:45 crc kubenswrapper[4812]: I1125 17:54:45.108493 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2zttm" event={"ID":"d92a641e-2595-439b-b9f1-685a9b637115","Type":"ContainerDied","Data":"3bca287664b04ac5000c5628de317bfbd17e8e45dc86ae607d8c8a50a5c50e8e"} Nov 25 17:54:45 crc kubenswrapper[4812]: I1125 17:54:45.182266 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-chsxd" Nov 25 17:54:46 crc kubenswrapper[4812]: I1125 17:54:46.124357 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2zttm" event={"ID":"d92a641e-2595-439b-b9f1-685a9b637115","Type":"ContainerStarted","Data":"baa0e191c67a7bcb2891c91dc530ebc9129dfbb24e6a1ff97139b5d47463f817"} Nov 25 17:54:46 crc kubenswrapper[4812]: I1125 17:54:46.154014 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-2zttm" podStartSLOduration=2.352032741 podStartE2EDuration="5.15399727s" podCreationTimestamp="2025-11-25 17:54:41 +0000 UTC" firstStartedPulling="2025-11-25 17:54:43.070678195 +0000 UTC m=+4057.910820290" lastFinishedPulling="2025-11-25 17:54:45.872642684 +0000 UTC m=+4060.712784819" observedRunningTime="2025-11-25 17:54:46.147767841 +0000 UTC m=+4060.987910016" watchObservedRunningTime="2025-11-25 17:54:46.15399727 +0000 UTC m=+4060.994139365" Nov 25 17:54:46 crc kubenswrapper[4812]: I1125 17:54:46.425863 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-chsxd"] Nov 25 17:54:46 crc kubenswrapper[4812]: I1125 17:54:46.832372 4812 scope.go:117] "RemoveContainer" containerID="98ad524e4fa76dbd5db0b77f4f3172a868bd73317b3f452a42d17221f9caab33" Nov 25 17:54:46 crc kubenswrapper[4812]: E1125 17:54:46.833446 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:54:48 crc kubenswrapper[4812]: I1125 17:54:48.146692 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-chsxd" podUID="e70d3b2e-8a38-4505-97e9-2cf4c378bcff" containerName="registry-server" containerID="cri-o://d2d79963adfa1b1e863554c729df6f2d9cc49c54e8671c700aec828978be084e" gracePeriod=2 Nov 25 17:54:48 crc kubenswrapper[4812]: E1125 17:54:48.322211 4812 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode70d3b2e_8a38_4505_97e9_2cf4c378bcff.slice/crio-conmon-d2d79963adfa1b1e863554c729df6f2d9cc49c54e8671c700aec828978be084e.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode70d3b2e_8a38_4505_97e9_2cf4c378bcff.slice/crio-d2d79963adfa1b1e863554c729df6f2d9cc49c54e8671c700aec828978be084e.scope\": RecentStats: unable to find data in memory cache]" Nov 25 17:54:48 crc kubenswrapper[4812]: I1125 17:54:48.623548 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-chsxd" Nov 25 17:54:48 crc kubenswrapper[4812]: I1125 17:54:48.815634 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jtr2g\" (UniqueName: \"kubernetes.io/projected/e70d3b2e-8a38-4505-97e9-2cf4c378bcff-kube-api-access-jtr2g\") pod \"e70d3b2e-8a38-4505-97e9-2cf4c378bcff\" (UID: \"e70d3b2e-8a38-4505-97e9-2cf4c378bcff\") " Nov 25 17:54:48 crc kubenswrapper[4812]: I1125 17:54:48.816141 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e70d3b2e-8a38-4505-97e9-2cf4c378bcff-utilities\") pod \"e70d3b2e-8a38-4505-97e9-2cf4c378bcff\" (UID: \"e70d3b2e-8a38-4505-97e9-2cf4c378bcff\") " Nov 25 17:54:48 crc kubenswrapper[4812]: I1125 17:54:48.816301 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e70d3b2e-8a38-4505-97e9-2cf4c378bcff-catalog-content\") pod \"e70d3b2e-8a38-4505-97e9-2cf4c378bcff\" (UID: \"e70d3b2e-8a38-4505-97e9-2cf4c378bcff\") " Nov 25 17:54:48 crc kubenswrapper[4812]: I1125 17:54:48.817792 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e70d3b2e-8a38-4505-97e9-2cf4c378bcff-utilities" (OuterVolumeSpecName: "utilities") pod "e70d3b2e-8a38-4505-97e9-2cf4c378bcff" (UID: "e70d3b2e-8a38-4505-97e9-2cf4c378bcff"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:54:48 crc kubenswrapper[4812]: I1125 17:54:48.829754 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e70d3b2e-8a38-4505-97e9-2cf4c378bcff-kube-api-access-jtr2g" (OuterVolumeSpecName: "kube-api-access-jtr2g") pod "e70d3b2e-8a38-4505-97e9-2cf4c378bcff" (UID: "e70d3b2e-8a38-4505-97e9-2cf4c378bcff"). InnerVolumeSpecName "kube-api-access-jtr2g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:54:48 crc kubenswrapper[4812]: I1125 17:54:48.841582 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e70d3b2e-8a38-4505-97e9-2cf4c378bcff-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e70d3b2e-8a38-4505-97e9-2cf4c378bcff" (UID: "e70d3b2e-8a38-4505-97e9-2cf4c378bcff"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:54:48 crc kubenswrapper[4812]: I1125 17:54:48.919244 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jtr2g\" (UniqueName: \"kubernetes.io/projected/e70d3b2e-8a38-4505-97e9-2cf4c378bcff-kube-api-access-jtr2g\") on node \"crc\" DevicePath \"\"" Nov 25 17:54:48 crc kubenswrapper[4812]: I1125 17:54:48.919281 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e70d3b2e-8a38-4505-97e9-2cf4c378bcff-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:54:48 crc kubenswrapper[4812]: I1125 17:54:48.919297 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e70d3b2e-8a38-4505-97e9-2cf4c378bcff-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:54:49 crc kubenswrapper[4812]: I1125 17:54:49.161035 4812 generic.go:334] "Generic (PLEG): container finished" podID="e70d3b2e-8a38-4505-97e9-2cf4c378bcff" containerID="d2d79963adfa1b1e863554c729df6f2d9cc49c54e8671c700aec828978be084e" exitCode=0 Nov 25 17:54:49 crc kubenswrapper[4812]: I1125 17:54:49.161097 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-chsxd" event={"ID":"e70d3b2e-8a38-4505-97e9-2cf4c378bcff","Type":"ContainerDied","Data":"d2d79963adfa1b1e863554c729df6f2d9cc49c54e8671c700aec828978be084e"} Nov 25 17:54:49 crc kubenswrapper[4812]: I1125 17:54:49.161137 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-chsxd" event={"ID":"e70d3b2e-8a38-4505-97e9-2cf4c378bcff","Type":"ContainerDied","Data":"c52d2b6920030f48c949741997828d3a82b09c6f6fa99fd519d85c98402f9b4a"} Nov 25 17:54:49 crc kubenswrapper[4812]: I1125 17:54:49.161137 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-chsxd" Nov 25 17:54:49 crc kubenswrapper[4812]: I1125 17:54:49.161159 4812 scope.go:117] "RemoveContainer" containerID="d2d79963adfa1b1e863554c729df6f2d9cc49c54e8671c700aec828978be084e" Nov 25 17:54:49 crc kubenswrapper[4812]: I1125 17:54:49.184282 4812 scope.go:117] "RemoveContainer" containerID="3d360b8c60ef4abe3578c75091191b417674b13414bb0e7c12008f176487a194" Nov 25 17:54:49 crc kubenswrapper[4812]: I1125 17:54:49.211303 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-chsxd"] Nov 25 17:54:49 crc kubenswrapper[4812]: I1125 17:54:49.223944 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-chsxd"] Nov 25 17:54:49 crc kubenswrapper[4812]: I1125 17:54:49.245579 4812 scope.go:117] "RemoveContainer" containerID="8bd50bf12c13e1b866695c45f94fbcf61e67a3cab65126dc48d7f9ffa13cf54d" Nov 25 17:54:49 crc kubenswrapper[4812]: I1125 17:54:49.305780 4812 scope.go:117] "RemoveContainer" containerID="d2d79963adfa1b1e863554c729df6f2d9cc49c54e8671c700aec828978be084e" Nov 25 17:54:49 crc kubenswrapper[4812]: E1125 17:54:49.307023 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d2d79963adfa1b1e863554c729df6f2d9cc49c54e8671c700aec828978be084e\": container with ID starting with d2d79963adfa1b1e863554c729df6f2d9cc49c54e8671c700aec828978be084e not found: ID does not exist" containerID="d2d79963adfa1b1e863554c729df6f2d9cc49c54e8671c700aec828978be084e" Nov 25 17:54:49 crc kubenswrapper[4812]: I1125 17:54:49.307082 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d2d79963adfa1b1e863554c729df6f2d9cc49c54e8671c700aec828978be084e"} err="failed to get container status \"d2d79963adfa1b1e863554c729df6f2d9cc49c54e8671c700aec828978be084e\": rpc error: code = NotFound desc = could not find container \"d2d79963adfa1b1e863554c729df6f2d9cc49c54e8671c700aec828978be084e\": container with ID starting with d2d79963adfa1b1e863554c729df6f2d9cc49c54e8671c700aec828978be084e not found: ID does not exist" Nov 25 17:54:49 crc kubenswrapper[4812]: I1125 17:54:49.307115 4812 scope.go:117] "RemoveContainer" containerID="3d360b8c60ef4abe3578c75091191b417674b13414bb0e7c12008f176487a194" Nov 25 17:54:49 crc kubenswrapper[4812]: E1125 17:54:49.307813 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3d360b8c60ef4abe3578c75091191b417674b13414bb0e7c12008f176487a194\": container with ID starting with 3d360b8c60ef4abe3578c75091191b417674b13414bb0e7c12008f176487a194 not found: ID does not exist" containerID="3d360b8c60ef4abe3578c75091191b417674b13414bb0e7c12008f176487a194" Nov 25 17:54:49 crc kubenswrapper[4812]: I1125 17:54:49.307839 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3d360b8c60ef4abe3578c75091191b417674b13414bb0e7c12008f176487a194"} err="failed to get container status \"3d360b8c60ef4abe3578c75091191b417674b13414bb0e7c12008f176487a194\": rpc error: code = NotFound desc = could not find container \"3d360b8c60ef4abe3578c75091191b417674b13414bb0e7c12008f176487a194\": container with ID starting with 3d360b8c60ef4abe3578c75091191b417674b13414bb0e7c12008f176487a194 not found: ID does not exist" Nov 25 17:54:49 crc kubenswrapper[4812]: I1125 17:54:49.307857 4812 scope.go:117] "RemoveContainer" containerID="8bd50bf12c13e1b866695c45f94fbcf61e67a3cab65126dc48d7f9ffa13cf54d" Nov 25 17:54:49 crc kubenswrapper[4812]: E1125 17:54:49.308463 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8bd50bf12c13e1b866695c45f94fbcf61e67a3cab65126dc48d7f9ffa13cf54d\": container with ID starting with 8bd50bf12c13e1b866695c45f94fbcf61e67a3cab65126dc48d7f9ffa13cf54d not found: ID does not exist" containerID="8bd50bf12c13e1b866695c45f94fbcf61e67a3cab65126dc48d7f9ffa13cf54d" Nov 25 17:54:49 crc kubenswrapper[4812]: I1125 17:54:49.308565 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8bd50bf12c13e1b866695c45f94fbcf61e67a3cab65126dc48d7f9ffa13cf54d"} err="failed to get container status \"8bd50bf12c13e1b866695c45f94fbcf61e67a3cab65126dc48d7f9ffa13cf54d\": rpc error: code = NotFound desc = could not find container \"8bd50bf12c13e1b866695c45f94fbcf61e67a3cab65126dc48d7f9ffa13cf54d\": container with ID starting with 8bd50bf12c13e1b866695c45f94fbcf61e67a3cab65126dc48d7f9ffa13cf54d not found: ID does not exist" Nov 25 17:54:49 crc kubenswrapper[4812]: I1125 17:54:49.847957 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e70d3b2e-8a38-4505-97e9-2cf4c378bcff" path="/var/lib/kubelet/pods/e70d3b2e-8a38-4505-97e9-2cf4c378bcff/volumes" Nov 25 17:54:51 crc kubenswrapper[4812]: I1125 17:54:51.825584 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-2zttm" Nov 25 17:54:51 crc kubenswrapper[4812]: I1125 17:54:51.825975 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-2zttm" Nov 25 17:54:51 crc kubenswrapper[4812]: I1125 17:54:51.886327 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-2zttm" Nov 25 17:54:52 crc kubenswrapper[4812]: I1125 17:54:52.260627 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-2zttm" Nov 25 17:54:52 crc kubenswrapper[4812]: I1125 17:54:52.832884 4812 scope.go:117] "RemoveContainer" containerID="117cdbf49a8d337558287c76d66fbfb9c8293bf1bb8907ad48cf2c0cf9b9d454" Nov 25 17:54:52 crc kubenswrapper[4812]: E1125 17:54:52.833452 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:54:53 crc kubenswrapper[4812]: I1125 17:54:53.245065 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2zttm"] Nov 25 17:54:54 crc kubenswrapper[4812]: I1125 17:54:54.236661 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-2zttm" podUID="d92a641e-2595-439b-b9f1-685a9b637115" containerName="registry-server" containerID="cri-o://baa0e191c67a7bcb2891c91dc530ebc9129dfbb24e6a1ff97139b5d47463f817" gracePeriod=2 Nov 25 17:54:54 crc kubenswrapper[4812]: I1125 17:54:54.746158 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2zttm" Nov 25 17:54:54 crc kubenswrapper[4812]: I1125 17:54:54.871768 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d92a641e-2595-439b-b9f1-685a9b637115-catalog-content\") pod \"d92a641e-2595-439b-b9f1-685a9b637115\" (UID: \"d92a641e-2595-439b-b9f1-685a9b637115\") " Nov 25 17:54:54 crc kubenswrapper[4812]: I1125 17:54:54.871871 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d92a641e-2595-439b-b9f1-685a9b637115-utilities\") pod \"d92a641e-2595-439b-b9f1-685a9b637115\" (UID: \"d92a641e-2595-439b-b9f1-685a9b637115\") " Nov 25 17:54:54 crc kubenswrapper[4812]: I1125 17:54:54.871992 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pbsb9\" (UniqueName: \"kubernetes.io/projected/d92a641e-2595-439b-b9f1-685a9b637115-kube-api-access-pbsb9\") pod \"d92a641e-2595-439b-b9f1-685a9b637115\" (UID: \"d92a641e-2595-439b-b9f1-685a9b637115\") " Nov 25 17:54:54 crc kubenswrapper[4812]: I1125 17:54:54.872904 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d92a641e-2595-439b-b9f1-685a9b637115-utilities" (OuterVolumeSpecName: "utilities") pod "d92a641e-2595-439b-b9f1-685a9b637115" (UID: "d92a641e-2595-439b-b9f1-685a9b637115"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:54:54 crc kubenswrapper[4812]: I1125 17:54:54.878130 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d92a641e-2595-439b-b9f1-685a9b637115-kube-api-access-pbsb9" (OuterVolumeSpecName: "kube-api-access-pbsb9") pod "d92a641e-2595-439b-b9f1-685a9b637115" (UID: "d92a641e-2595-439b-b9f1-685a9b637115"). InnerVolumeSpecName "kube-api-access-pbsb9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:54:54 crc kubenswrapper[4812]: I1125 17:54:54.975964 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pbsb9\" (UniqueName: \"kubernetes.io/projected/d92a641e-2595-439b-b9f1-685a9b637115-kube-api-access-pbsb9\") on node \"crc\" DevicePath \"\"" Nov 25 17:54:54 crc kubenswrapper[4812]: I1125 17:54:54.976598 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d92a641e-2595-439b-b9f1-685a9b637115-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:54:55 crc kubenswrapper[4812]: I1125 17:54:55.252844 4812 generic.go:334] "Generic (PLEG): container finished" podID="d92a641e-2595-439b-b9f1-685a9b637115" containerID="baa0e191c67a7bcb2891c91dc530ebc9129dfbb24e6a1ff97139b5d47463f817" exitCode=0 Nov 25 17:54:55 crc kubenswrapper[4812]: I1125 17:54:55.252924 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2zttm" event={"ID":"d92a641e-2595-439b-b9f1-685a9b637115","Type":"ContainerDied","Data":"baa0e191c67a7bcb2891c91dc530ebc9129dfbb24e6a1ff97139b5d47463f817"} Nov 25 17:54:55 crc kubenswrapper[4812]: I1125 17:54:55.252947 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2zttm" Nov 25 17:54:55 crc kubenswrapper[4812]: I1125 17:54:55.252975 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2zttm" event={"ID":"d92a641e-2595-439b-b9f1-685a9b637115","Type":"ContainerDied","Data":"83de964f7fc462a3cab1dd1307697abc15bada73a273724c41a9e643ef21c645"} Nov 25 17:54:55 crc kubenswrapper[4812]: I1125 17:54:55.253021 4812 scope.go:117] "RemoveContainer" containerID="baa0e191c67a7bcb2891c91dc530ebc9129dfbb24e6a1ff97139b5d47463f817" Nov 25 17:54:55 crc kubenswrapper[4812]: I1125 17:54:55.282803 4812 scope.go:117] "RemoveContainer" containerID="3bca287664b04ac5000c5628de317bfbd17e8e45dc86ae607d8c8a50a5c50e8e" Nov 25 17:54:55 crc kubenswrapper[4812]: I1125 17:54:55.307100 4812 scope.go:117] "RemoveContainer" containerID="6d71dcdb742c6011abce6021c51848ff316883af6163750a31ec55103908d1eb" Nov 25 17:54:55 crc kubenswrapper[4812]: I1125 17:54:55.378006 4812 scope.go:117] "RemoveContainer" containerID="baa0e191c67a7bcb2891c91dc530ebc9129dfbb24e6a1ff97139b5d47463f817" Nov 25 17:54:55 crc kubenswrapper[4812]: E1125 17:54:55.378571 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"baa0e191c67a7bcb2891c91dc530ebc9129dfbb24e6a1ff97139b5d47463f817\": container with ID starting with baa0e191c67a7bcb2891c91dc530ebc9129dfbb24e6a1ff97139b5d47463f817 not found: ID does not exist" containerID="baa0e191c67a7bcb2891c91dc530ebc9129dfbb24e6a1ff97139b5d47463f817" Nov 25 17:54:55 crc kubenswrapper[4812]: I1125 17:54:55.378608 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"baa0e191c67a7bcb2891c91dc530ebc9129dfbb24e6a1ff97139b5d47463f817"} err="failed to get container status \"baa0e191c67a7bcb2891c91dc530ebc9129dfbb24e6a1ff97139b5d47463f817\": rpc error: code = NotFound desc = could not find container \"baa0e191c67a7bcb2891c91dc530ebc9129dfbb24e6a1ff97139b5d47463f817\": container with ID starting with baa0e191c67a7bcb2891c91dc530ebc9129dfbb24e6a1ff97139b5d47463f817 not found: ID does not exist" Nov 25 17:54:55 crc kubenswrapper[4812]: I1125 17:54:55.378635 4812 scope.go:117] "RemoveContainer" containerID="3bca287664b04ac5000c5628de317bfbd17e8e45dc86ae607d8c8a50a5c50e8e" Nov 25 17:54:55 crc kubenswrapper[4812]: E1125 17:54:55.378917 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3bca287664b04ac5000c5628de317bfbd17e8e45dc86ae607d8c8a50a5c50e8e\": container with ID starting with 3bca287664b04ac5000c5628de317bfbd17e8e45dc86ae607d8c8a50a5c50e8e not found: ID does not exist" containerID="3bca287664b04ac5000c5628de317bfbd17e8e45dc86ae607d8c8a50a5c50e8e" Nov 25 17:54:55 crc kubenswrapper[4812]: I1125 17:54:55.378974 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3bca287664b04ac5000c5628de317bfbd17e8e45dc86ae607d8c8a50a5c50e8e"} err="failed to get container status \"3bca287664b04ac5000c5628de317bfbd17e8e45dc86ae607d8c8a50a5c50e8e\": rpc error: code = NotFound desc = could not find container \"3bca287664b04ac5000c5628de317bfbd17e8e45dc86ae607d8c8a50a5c50e8e\": container with ID starting with 3bca287664b04ac5000c5628de317bfbd17e8e45dc86ae607d8c8a50a5c50e8e not found: ID does not exist" Nov 25 17:54:55 crc kubenswrapper[4812]: I1125 17:54:55.378996 4812 scope.go:117] "RemoveContainer" containerID="6d71dcdb742c6011abce6021c51848ff316883af6163750a31ec55103908d1eb" Nov 25 17:54:55 crc kubenswrapper[4812]: E1125 17:54:55.379275 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6d71dcdb742c6011abce6021c51848ff316883af6163750a31ec55103908d1eb\": container with ID starting with 6d71dcdb742c6011abce6021c51848ff316883af6163750a31ec55103908d1eb not found: ID does not exist" containerID="6d71dcdb742c6011abce6021c51848ff316883af6163750a31ec55103908d1eb" Nov 25 17:54:55 crc kubenswrapper[4812]: I1125 17:54:55.379324 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6d71dcdb742c6011abce6021c51848ff316883af6163750a31ec55103908d1eb"} err="failed to get container status \"6d71dcdb742c6011abce6021c51848ff316883af6163750a31ec55103908d1eb\": rpc error: code = NotFound desc = could not find container \"6d71dcdb742c6011abce6021c51848ff316883af6163750a31ec55103908d1eb\": container with ID starting with 6d71dcdb742c6011abce6021c51848ff316883af6163750a31ec55103908d1eb not found: ID does not exist" Nov 25 17:54:56 crc kubenswrapper[4812]: I1125 17:54:56.041775 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d92a641e-2595-439b-b9f1-685a9b637115-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d92a641e-2595-439b-b9f1-685a9b637115" (UID: "d92a641e-2595-439b-b9f1-685a9b637115"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:54:56 crc kubenswrapper[4812]: I1125 17:54:56.102922 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d92a641e-2595-439b-b9f1-685a9b637115-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:54:56 crc kubenswrapper[4812]: I1125 17:54:56.193130 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2zttm"] Nov 25 17:54:56 crc kubenswrapper[4812]: I1125 17:54:56.205708 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-2zttm"] Nov 25 17:54:57 crc kubenswrapper[4812]: I1125 17:54:57.833150 4812 scope.go:117] "RemoveContainer" containerID="98ad524e4fa76dbd5db0b77f4f3172a868bd73317b3f452a42d17221f9caab33" Nov 25 17:54:57 crc kubenswrapper[4812]: E1125 17:54:57.834565 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:54:57 crc kubenswrapper[4812]: I1125 17:54:57.850925 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d92a641e-2595-439b-b9f1-685a9b637115" path="/var/lib/kubelet/pods/d92a641e-2595-439b-b9f1-685a9b637115/volumes" Nov 25 17:55:04 crc kubenswrapper[4812]: I1125 17:55:04.832695 4812 scope.go:117] "RemoveContainer" containerID="117cdbf49a8d337558287c76d66fbfb9c8293bf1bb8907ad48cf2c0cf9b9d454" Nov 25 17:55:04 crc kubenswrapper[4812]: E1125 17:55:04.833722 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:55:09 crc kubenswrapper[4812]: I1125 17:55:09.831408 4812 scope.go:117] "RemoveContainer" containerID="98ad524e4fa76dbd5db0b77f4f3172a868bd73317b3f452a42d17221f9caab33" Nov 25 17:55:09 crc kubenswrapper[4812]: E1125 17:55:09.832316 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:55:18 crc kubenswrapper[4812]: I1125 17:55:18.831055 4812 scope.go:117] "RemoveContainer" containerID="117cdbf49a8d337558287c76d66fbfb9c8293bf1bb8907ad48cf2c0cf9b9d454" Nov 25 17:55:18 crc kubenswrapper[4812]: E1125 17:55:18.831819 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:55:23 crc kubenswrapper[4812]: I1125 17:55:23.832061 4812 scope.go:117] "RemoveContainer" containerID="98ad524e4fa76dbd5db0b77f4f3172a868bd73317b3f452a42d17221f9caab33" Nov 25 17:55:23 crc kubenswrapper[4812]: E1125 17:55:23.833332 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:55:29 crc kubenswrapper[4812]: I1125 17:55:29.832935 4812 scope.go:117] "RemoveContainer" containerID="117cdbf49a8d337558287c76d66fbfb9c8293bf1bb8907ad48cf2c0cf9b9d454" Nov 25 17:55:29 crc kubenswrapper[4812]: E1125 17:55:29.834110 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:55:34 crc kubenswrapper[4812]: I1125 17:55:34.832563 4812 scope.go:117] "RemoveContainer" containerID="98ad524e4fa76dbd5db0b77f4f3172a868bd73317b3f452a42d17221f9caab33" Nov 25 17:55:34 crc kubenswrapper[4812]: E1125 17:55:34.834877 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:55:40 crc kubenswrapper[4812]: I1125 17:55:40.831742 4812 scope.go:117] "RemoveContainer" containerID="117cdbf49a8d337558287c76d66fbfb9c8293bf1bb8907ad48cf2c0cf9b9d454" Nov 25 17:55:40 crc kubenswrapper[4812]: E1125 17:55:40.832666 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:55:48 crc kubenswrapper[4812]: I1125 17:55:48.832603 4812 scope.go:117] "RemoveContainer" containerID="98ad524e4fa76dbd5db0b77f4f3172a868bd73317b3f452a42d17221f9caab33" Nov 25 17:55:48 crc kubenswrapper[4812]: E1125 17:55:48.833430 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:55:53 crc kubenswrapper[4812]: I1125 17:55:53.832220 4812 scope.go:117] "RemoveContainer" containerID="117cdbf49a8d337558287c76d66fbfb9c8293bf1bb8907ad48cf2c0cf9b9d454" Nov 25 17:55:53 crc kubenswrapper[4812]: E1125 17:55:53.833235 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:55:57 crc kubenswrapper[4812]: I1125 17:55:57.333520 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:55:57 crc kubenswrapper[4812]: I1125 17:55:57.333953 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:55:59 crc kubenswrapper[4812]: I1125 17:55:59.831678 4812 scope.go:117] "RemoveContainer" containerID="98ad524e4fa76dbd5db0b77f4f3172a868bd73317b3f452a42d17221f9caab33" Nov 25 17:55:59 crc kubenswrapper[4812]: E1125 17:55:59.833314 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:56:08 crc kubenswrapper[4812]: I1125 17:56:08.832519 4812 scope.go:117] "RemoveContainer" containerID="117cdbf49a8d337558287c76d66fbfb9c8293bf1bb8907ad48cf2c0cf9b9d454" Nov 25 17:56:08 crc kubenswrapper[4812]: E1125 17:56:08.833372 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:56:10 crc kubenswrapper[4812]: I1125 17:56:10.832277 4812 scope.go:117] "RemoveContainer" containerID="98ad524e4fa76dbd5db0b77f4f3172a868bd73317b3f452a42d17221f9caab33" Nov 25 17:56:10 crc kubenswrapper[4812]: E1125 17:56:10.832964 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:56:22 crc kubenswrapper[4812]: I1125 17:56:22.832511 4812 scope.go:117] "RemoveContainer" containerID="117cdbf49a8d337558287c76d66fbfb9c8293bf1bb8907ad48cf2c0cf9b9d454" Nov 25 17:56:22 crc kubenswrapper[4812]: E1125 17:56:22.833616 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:56:25 crc kubenswrapper[4812]: I1125 17:56:25.845038 4812 scope.go:117] "RemoveContainer" containerID="98ad524e4fa76dbd5db0b77f4f3172a868bd73317b3f452a42d17221f9caab33" Nov 25 17:56:25 crc kubenswrapper[4812]: E1125 17:56:25.845738 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:56:27 crc kubenswrapper[4812]: I1125 17:56:27.332623 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:56:27 crc kubenswrapper[4812]: I1125 17:56:27.333063 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:56:35 crc kubenswrapper[4812]: I1125 17:56:35.842139 4812 scope.go:117] "RemoveContainer" containerID="117cdbf49a8d337558287c76d66fbfb9c8293bf1bb8907ad48cf2c0cf9b9d454" Nov 25 17:56:35 crc kubenswrapper[4812]: E1125 17:56:35.842927 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:56:38 crc kubenswrapper[4812]: I1125 17:56:38.831917 4812 scope.go:117] "RemoveContainer" containerID="98ad524e4fa76dbd5db0b77f4f3172a868bd73317b3f452a42d17221f9caab33" Nov 25 17:56:38 crc kubenswrapper[4812]: E1125 17:56:38.832973 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:56:46 crc kubenswrapper[4812]: I1125 17:56:46.832014 4812 scope.go:117] "RemoveContainer" containerID="117cdbf49a8d337558287c76d66fbfb9c8293bf1bb8907ad48cf2c0cf9b9d454" Nov 25 17:56:46 crc kubenswrapper[4812]: E1125 17:56:46.833912 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:56:51 crc kubenswrapper[4812]: I1125 17:56:51.832687 4812 scope.go:117] "RemoveContainer" containerID="98ad524e4fa76dbd5db0b77f4f3172a868bd73317b3f452a42d17221f9caab33" Nov 25 17:56:51 crc kubenswrapper[4812]: E1125 17:56:51.833269 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:56:57 crc kubenswrapper[4812]: I1125 17:56:57.332693 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:56:57 crc kubenswrapper[4812]: I1125 17:56:57.333510 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:56:57 crc kubenswrapper[4812]: I1125 17:56:57.333578 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" Nov 25 17:56:57 crc kubenswrapper[4812]: I1125 17:56:57.334311 4812 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"065a9508690695f07a5fa4b09908b82a828df3246cb3fdae1d8a1ce83f741172"} pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 17:56:57 crc kubenswrapper[4812]: I1125 17:56:57.334363 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" containerID="cri-o://065a9508690695f07a5fa4b09908b82a828df3246cb3fdae1d8a1ce83f741172" gracePeriod=600 Nov 25 17:56:57 crc kubenswrapper[4812]: I1125 17:56:57.698465 4812 generic.go:334] "Generic (PLEG): container finished" podID="8ed911cf-2139-4b12-84ba-af635585ba29" containerID="065a9508690695f07a5fa4b09908b82a828df3246cb3fdae1d8a1ce83f741172" exitCode=0 Nov 25 17:56:57 crc kubenswrapper[4812]: I1125 17:56:57.698504 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" event={"ID":"8ed911cf-2139-4b12-84ba-af635585ba29","Type":"ContainerDied","Data":"065a9508690695f07a5fa4b09908b82a828df3246cb3fdae1d8a1ce83f741172"} Nov 25 17:56:57 crc kubenswrapper[4812]: I1125 17:56:57.698802 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" event={"ID":"8ed911cf-2139-4b12-84ba-af635585ba29","Type":"ContainerStarted","Data":"9af0f3138c22c859714afa98ee332470e15d86804218511d7c6509c67e1f94e4"} Nov 25 17:56:57 crc kubenswrapper[4812]: I1125 17:56:57.698825 4812 scope.go:117] "RemoveContainer" containerID="8752772c4c7b9e552da7d3e8bd64ef561b7ab0875c65565697c2b0831f94b402" Nov 25 17:57:00 crc kubenswrapper[4812]: I1125 17:57:00.831703 4812 scope.go:117] "RemoveContainer" containerID="117cdbf49a8d337558287c76d66fbfb9c8293bf1bb8907ad48cf2c0cf9b9d454" Nov 25 17:57:00 crc kubenswrapper[4812]: E1125 17:57:00.832579 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:57:03 crc kubenswrapper[4812]: I1125 17:57:03.832418 4812 scope.go:117] "RemoveContainer" containerID="98ad524e4fa76dbd5db0b77f4f3172a868bd73317b3f452a42d17221f9caab33" Nov 25 17:57:03 crc kubenswrapper[4812]: E1125 17:57:03.833664 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:57:13 crc kubenswrapper[4812]: I1125 17:57:13.832914 4812 scope.go:117] "RemoveContainer" containerID="117cdbf49a8d337558287c76d66fbfb9c8293bf1bb8907ad48cf2c0cf9b9d454" Nov 25 17:57:13 crc kubenswrapper[4812]: E1125 17:57:13.834083 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:57:17 crc kubenswrapper[4812]: I1125 17:57:17.832084 4812 scope.go:117] "RemoveContainer" containerID="98ad524e4fa76dbd5db0b77f4f3172a868bd73317b3f452a42d17221f9caab33" Nov 25 17:57:17 crc kubenswrapper[4812]: E1125 17:57:17.835187 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:57:25 crc kubenswrapper[4812]: I1125 17:57:25.832498 4812 scope.go:117] "RemoveContainer" containerID="117cdbf49a8d337558287c76d66fbfb9c8293bf1bb8907ad48cf2c0cf9b9d454" Nov 25 17:57:25 crc kubenswrapper[4812]: E1125 17:57:25.833602 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:57:32 crc kubenswrapper[4812]: I1125 17:57:32.831942 4812 scope.go:117] "RemoveContainer" containerID="98ad524e4fa76dbd5db0b77f4f3172a868bd73317b3f452a42d17221f9caab33" Nov 25 17:57:32 crc kubenswrapper[4812]: E1125 17:57:32.832804 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:57:40 crc kubenswrapper[4812]: I1125 17:57:40.832795 4812 scope.go:117] "RemoveContainer" containerID="117cdbf49a8d337558287c76d66fbfb9c8293bf1bb8907ad48cf2c0cf9b9d454" Nov 25 17:57:40 crc kubenswrapper[4812]: E1125 17:57:40.834028 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:57:43 crc kubenswrapper[4812]: I1125 17:57:43.832167 4812 scope.go:117] "RemoveContainer" containerID="98ad524e4fa76dbd5db0b77f4f3172a868bd73317b3f452a42d17221f9caab33" Nov 25 17:57:43 crc kubenswrapper[4812]: E1125 17:57:43.833131 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:57:55 crc kubenswrapper[4812]: I1125 17:57:55.843462 4812 scope.go:117] "RemoveContainer" containerID="117cdbf49a8d337558287c76d66fbfb9c8293bf1bb8907ad48cf2c0cf9b9d454" Nov 25 17:57:55 crc kubenswrapper[4812]: E1125 17:57:55.844571 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:57:56 crc kubenswrapper[4812]: I1125 17:57:56.833203 4812 scope.go:117] "RemoveContainer" containerID="98ad524e4fa76dbd5db0b77f4f3172a868bd73317b3f452a42d17221f9caab33" Nov 25 17:57:56 crc kubenswrapper[4812]: E1125 17:57:56.834081 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:58:09 crc kubenswrapper[4812]: I1125 17:58:09.832260 4812 scope.go:117] "RemoveContainer" containerID="117cdbf49a8d337558287c76d66fbfb9c8293bf1bb8907ad48cf2c0cf9b9d454" Nov 25 17:58:09 crc kubenswrapper[4812]: E1125 17:58:09.833343 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:58:11 crc kubenswrapper[4812]: I1125 17:58:11.831981 4812 scope.go:117] "RemoveContainer" containerID="98ad524e4fa76dbd5db0b77f4f3172a868bd73317b3f452a42d17221f9caab33" Nov 25 17:58:11 crc kubenswrapper[4812]: E1125 17:58:11.832637 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:58:14 crc kubenswrapper[4812]: I1125 17:58:14.251383 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-p58qb"] Nov 25 17:58:14 crc kubenswrapper[4812]: E1125 17:58:14.252144 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e70d3b2e-8a38-4505-97e9-2cf4c378bcff" containerName="registry-server" Nov 25 17:58:14 crc kubenswrapper[4812]: I1125 17:58:14.252162 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="e70d3b2e-8a38-4505-97e9-2cf4c378bcff" containerName="registry-server" Nov 25 17:58:14 crc kubenswrapper[4812]: E1125 17:58:14.252194 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d92a641e-2595-439b-b9f1-685a9b637115" containerName="registry-server" Nov 25 17:58:14 crc kubenswrapper[4812]: I1125 17:58:14.252205 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="d92a641e-2595-439b-b9f1-685a9b637115" containerName="registry-server" Nov 25 17:58:14 crc kubenswrapper[4812]: E1125 17:58:14.252227 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d92a641e-2595-439b-b9f1-685a9b637115" containerName="extract-utilities" Nov 25 17:58:14 crc kubenswrapper[4812]: I1125 17:58:14.252236 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="d92a641e-2595-439b-b9f1-685a9b637115" containerName="extract-utilities" Nov 25 17:58:14 crc kubenswrapper[4812]: E1125 17:58:14.252264 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e70d3b2e-8a38-4505-97e9-2cf4c378bcff" containerName="extract-utilities" Nov 25 17:58:14 crc kubenswrapper[4812]: I1125 17:58:14.252273 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="e70d3b2e-8a38-4505-97e9-2cf4c378bcff" containerName="extract-utilities" Nov 25 17:58:14 crc kubenswrapper[4812]: E1125 17:58:14.252292 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d92a641e-2595-439b-b9f1-685a9b637115" containerName="extract-content" Nov 25 17:58:14 crc kubenswrapper[4812]: I1125 17:58:14.252301 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="d92a641e-2595-439b-b9f1-685a9b637115" containerName="extract-content" Nov 25 17:58:14 crc kubenswrapper[4812]: E1125 17:58:14.252321 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e70d3b2e-8a38-4505-97e9-2cf4c378bcff" containerName="extract-content" Nov 25 17:58:14 crc kubenswrapper[4812]: I1125 17:58:14.252331 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="e70d3b2e-8a38-4505-97e9-2cf4c378bcff" containerName="extract-content" Nov 25 17:58:14 crc kubenswrapper[4812]: I1125 17:58:14.252787 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="e70d3b2e-8a38-4505-97e9-2cf4c378bcff" containerName="registry-server" Nov 25 17:58:14 crc kubenswrapper[4812]: I1125 17:58:14.252825 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="d92a641e-2595-439b-b9f1-685a9b637115" containerName="registry-server" Nov 25 17:58:14 crc kubenswrapper[4812]: I1125 17:58:14.254722 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-p58qb" Nov 25 17:58:14 crc kubenswrapper[4812]: I1125 17:58:14.280455 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-p58qb"] Nov 25 17:58:14 crc kubenswrapper[4812]: I1125 17:58:14.296986 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d9da3b4f-0914-49b5-9f8c-e5fee6848226-utilities\") pod \"community-operators-p58qb\" (UID: \"d9da3b4f-0914-49b5-9f8c-e5fee6848226\") " pod="openshift-marketplace/community-operators-p58qb" Nov 25 17:58:14 crc kubenswrapper[4812]: I1125 17:58:14.297204 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-glhq9\" (UniqueName: \"kubernetes.io/projected/d9da3b4f-0914-49b5-9f8c-e5fee6848226-kube-api-access-glhq9\") pod \"community-operators-p58qb\" (UID: \"d9da3b4f-0914-49b5-9f8c-e5fee6848226\") " pod="openshift-marketplace/community-operators-p58qb" Nov 25 17:58:14 crc kubenswrapper[4812]: I1125 17:58:14.297244 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d9da3b4f-0914-49b5-9f8c-e5fee6848226-catalog-content\") pod \"community-operators-p58qb\" (UID: \"d9da3b4f-0914-49b5-9f8c-e5fee6848226\") " pod="openshift-marketplace/community-operators-p58qb" Nov 25 17:58:14 crc kubenswrapper[4812]: I1125 17:58:14.399063 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glhq9\" (UniqueName: \"kubernetes.io/projected/d9da3b4f-0914-49b5-9f8c-e5fee6848226-kube-api-access-glhq9\") pod \"community-operators-p58qb\" (UID: \"d9da3b4f-0914-49b5-9f8c-e5fee6848226\") " pod="openshift-marketplace/community-operators-p58qb" Nov 25 17:58:14 crc kubenswrapper[4812]: I1125 17:58:14.399115 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d9da3b4f-0914-49b5-9f8c-e5fee6848226-catalog-content\") pod \"community-operators-p58qb\" (UID: \"d9da3b4f-0914-49b5-9f8c-e5fee6848226\") " pod="openshift-marketplace/community-operators-p58qb" Nov 25 17:58:14 crc kubenswrapper[4812]: I1125 17:58:14.399199 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d9da3b4f-0914-49b5-9f8c-e5fee6848226-utilities\") pod \"community-operators-p58qb\" (UID: \"d9da3b4f-0914-49b5-9f8c-e5fee6848226\") " pod="openshift-marketplace/community-operators-p58qb" Nov 25 17:58:14 crc kubenswrapper[4812]: I1125 17:58:14.399709 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d9da3b4f-0914-49b5-9f8c-e5fee6848226-utilities\") pod \"community-operators-p58qb\" (UID: \"d9da3b4f-0914-49b5-9f8c-e5fee6848226\") " pod="openshift-marketplace/community-operators-p58qb" Nov 25 17:58:14 crc kubenswrapper[4812]: I1125 17:58:14.399903 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d9da3b4f-0914-49b5-9f8c-e5fee6848226-catalog-content\") pod \"community-operators-p58qb\" (UID: \"d9da3b4f-0914-49b5-9f8c-e5fee6848226\") " pod="openshift-marketplace/community-operators-p58qb" Nov 25 17:58:14 crc kubenswrapper[4812]: I1125 17:58:14.425761 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-glhq9\" (UniqueName: \"kubernetes.io/projected/d9da3b4f-0914-49b5-9f8c-e5fee6848226-kube-api-access-glhq9\") pod \"community-operators-p58qb\" (UID: \"d9da3b4f-0914-49b5-9f8c-e5fee6848226\") " pod="openshift-marketplace/community-operators-p58qb" Nov 25 17:58:14 crc kubenswrapper[4812]: I1125 17:58:14.577776 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-p58qb" Nov 25 17:58:15 crc kubenswrapper[4812]: I1125 17:58:15.144595 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-p58qb"] Nov 25 17:58:15 crc kubenswrapper[4812]: I1125 17:58:15.557222 4812 generic.go:334] "Generic (PLEG): container finished" podID="d9da3b4f-0914-49b5-9f8c-e5fee6848226" containerID="b9eaf12ac27bf3d8b74af2e02402794742ab43c32916e8227a0d2befae907b37" exitCode=0 Nov 25 17:58:15 crc kubenswrapper[4812]: I1125 17:58:15.557270 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p58qb" event={"ID":"d9da3b4f-0914-49b5-9f8c-e5fee6848226","Type":"ContainerDied","Data":"b9eaf12ac27bf3d8b74af2e02402794742ab43c32916e8227a0d2befae907b37"} Nov 25 17:58:15 crc kubenswrapper[4812]: I1125 17:58:15.557456 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p58qb" event={"ID":"d9da3b4f-0914-49b5-9f8c-e5fee6848226","Type":"ContainerStarted","Data":"e9b9e568a2f1516de617c2ec97b663aa636283417b4e3d856980e6e040a99ceb"} Nov 25 17:58:18 crc kubenswrapper[4812]: I1125 17:58:18.717614 4812 generic.go:334] "Generic (PLEG): container finished" podID="d9da3b4f-0914-49b5-9f8c-e5fee6848226" containerID="319247db19a3d2c3c7fc8edd443129ce633fcc61fcec47070bb502079f0f2e0a" exitCode=0 Nov 25 17:58:18 crc kubenswrapper[4812]: I1125 17:58:18.717691 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p58qb" event={"ID":"d9da3b4f-0914-49b5-9f8c-e5fee6848226","Type":"ContainerDied","Data":"319247db19a3d2c3c7fc8edd443129ce633fcc61fcec47070bb502079f0f2e0a"} Nov 25 17:58:19 crc kubenswrapper[4812]: I1125 17:58:19.727145 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p58qb" event={"ID":"d9da3b4f-0914-49b5-9f8c-e5fee6848226","Type":"ContainerStarted","Data":"e2f27e18114eaae3ebec59a92f29b6ac88483c3b8eb6d221daab8b3b03fd6557"} Nov 25 17:58:19 crc kubenswrapper[4812]: I1125 17:58:19.760201 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-p58qb" podStartSLOduration=2.184299541 podStartE2EDuration="5.760176076s" podCreationTimestamp="2025-11-25 17:58:14 +0000 UTC" firstStartedPulling="2025-11-25 17:58:15.559549187 +0000 UTC m=+4270.399691282" lastFinishedPulling="2025-11-25 17:58:19.135425702 +0000 UTC m=+4273.975567817" observedRunningTime="2025-11-25 17:58:19.757101993 +0000 UTC m=+4274.597244148" watchObservedRunningTime="2025-11-25 17:58:19.760176076 +0000 UTC m=+4274.600318171" Nov 25 17:58:20 crc kubenswrapper[4812]: I1125 17:58:20.831379 4812 scope.go:117] "RemoveContainer" containerID="117cdbf49a8d337558287c76d66fbfb9c8293bf1bb8907ad48cf2c0cf9b9d454" Nov 25 17:58:20 crc kubenswrapper[4812]: E1125 17:58:20.831963 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:58:23 crc kubenswrapper[4812]: I1125 17:58:23.833587 4812 scope.go:117] "RemoveContainer" containerID="98ad524e4fa76dbd5db0b77f4f3172a868bd73317b3f452a42d17221f9caab33" Nov 25 17:58:23 crc kubenswrapper[4812]: E1125 17:58:23.834194 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:58:24 crc kubenswrapper[4812]: I1125 17:58:24.579567 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-p58qb" Nov 25 17:58:24 crc kubenswrapper[4812]: I1125 17:58:24.580030 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-p58qb" Nov 25 17:58:24 crc kubenswrapper[4812]: I1125 17:58:24.678456 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-p58qb" Nov 25 17:58:24 crc kubenswrapper[4812]: I1125 17:58:24.822517 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-p58qb" Nov 25 17:58:24 crc kubenswrapper[4812]: I1125 17:58:24.920158 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-p58qb"] Nov 25 17:58:26 crc kubenswrapper[4812]: I1125 17:58:26.792129 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-p58qb" podUID="d9da3b4f-0914-49b5-9f8c-e5fee6848226" containerName="registry-server" containerID="cri-o://e2f27e18114eaae3ebec59a92f29b6ac88483c3b8eb6d221daab8b3b03fd6557" gracePeriod=2 Nov 25 17:58:27 crc kubenswrapper[4812]: I1125 17:58:27.320043 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-p58qb" Nov 25 17:58:27 crc kubenswrapper[4812]: I1125 17:58:27.395383 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d9da3b4f-0914-49b5-9f8c-e5fee6848226-utilities\") pod \"d9da3b4f-0914-49b5-9f8c-e5fee6848226\" (UID: \"d9da3b4f-0914-49b5-9f8c-e5fee6848226\") " Nov 25 17:58:27 crc kubenswrapper[4812]: I1125 17:58:27.395488 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d9da3b4f-0914-49b5-9f8c-e5fee6848226-catalog-content\") pod \"d9da3b4f-0914-49b5-9f8c-e5fee6848226\" (UID: \"d9da3b4f-0914-49b5-9f8c-e5fee6848226\") " Nov 25 17:58:27 crc kubenswrapper[4812]: I1125 17:58:27.395589 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-glhq9\" (UniqueName: \"kubernetes.io/projected/d9da3b4f-0914-49b5-9f8c-e5fee6848226-kube-api-access-glhq9\") pod \"d9da3b4f-0914-49b5-9f8c-e5fee6848226\" (UID: \"d9da3b4f-0914-49b5-9f8c-e5fee6848226\") " Nov 25 17:58:27 crc kubenswrapper[4812]: I1125 17:58:27.396585 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d9da3b4f-0914-49b5-9f8c-e5fee6848226-utilities" (OuterVolumeSpecName: "utilities") pod "d9da3b4f-0914-49b5-9f8c-e5fee6848226" (UID: "d9da3b4f-0914-49b5-9f8c-e5fee6848226"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:58:27 crc kubenswrapper[4812]: I1125 17:58:27.406416 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d9da3b4f-0914-49b5-9f8c-e5fee6848226-kube-api-access-glhq9" (OuterVolumeSpecName: "kube-api-access-glhq9") pod "d9da3b4f-0914-49b5-9f8c-e5fee6848226" (UID: "d9da3b4f-0914-49b5-9f8c-e5fee6848226"). InnerVolumeSpecName "kube-api-access-glhq9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 17:58:27 crc kubenswrapper[4812]: I1125 17:58:27.446868 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d9da3b4f-0914-49b5-9f8c-e5fee6848226-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d9da3b4f-0914-49b5-9f8c-e5fee6848226" (UID: "d9da3b4f-0914-49b5-9f8c-e5fee6848226"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 17:58:27 crc kubenswrapper[4812]: I1125 17:58:27.498522 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d9da3b4f-0914-49b5-9f8c-e5fee6848226-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 17:58:27 crc kubenswrapper[4812]: I1125 17:58:27.498580 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d9da3b4f-0914-49b5-9f8c-e5fee6848226-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 17:58:27 crc kubenswrapper[4812]: I1125 17:58:27.498597 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-glhq9\" (UniqueName: \"kubernetes.io/projected/d9da3b4f-0914-49b5-9f8c-e5fee6848226-kube-api-access-glhq9\") on node \"crc\" DevicePath \"\"" Nov 25 17:58:27 crc kubenswrapper[4812]: I1125 17:58:27.802150 4812 generic.go:334] "Generic (PLEG): container finished" podID="d9da3b4f-0914-49b5-9f8c-e5fee6848226" containerID="e2f27e18114eaae3ebec59a92f29b6ac88483c3b8eb6d221daab8b3b03fd6557" exitCode=0 Nov 25 17:58:27 crc kubenswrapper[4812]: I1125 17:58:27.802208 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-p58qb" Nov 25 17:58:27 crc kubenswrapper[4812]: I1125 17:58:27.802199 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p58qb" event={"ID":"d9da3b4f-0914-49b5-9f8c-e5fee6848226","Type":"ContainerDied","Data":"e2f27e18114eaae3ebec59a92f29b6ac88483c3b8eb6d221daab8b3b03fd6557"} Nov 25 17:58:27 crc kubenswrapper[4812]: I1125 17:58:27.802711 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p58qb" event={"ID":"d9da3b4f-0914-49b5-9f8c-e5fee6848226","Type":"ContainerDied","Data":"e9b9e568a2f1516de617c2ec97b663aa636283417b4e3d856980e6e040a99ceb"} Nov 25 17:58:27 crc kubenswrapper[4812]: I1125 17:58:27.802735 4812 scope.go:117] "RemoveContainer" containerID="e2f27e18114eaae3ebec59a92f29b6ac88483c3b8eb6d221daab8b3b03fd6557" Nov 25 17:58:27 crc kubenswrapper[4812]: I1125 17:58:27.840800 4812 scope.go:117] "RemoveContainer" containerID="319247db19a3d2c3c7fc8edd443129ce633fcc61fcec47070bb502079f0f2e0a" Nov 25 17:58:27 crc kubenswrapper[4812]: I1125 17:58:27.849773 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-p58qb"] Nov 25 17:58:27 crc kubenswrapper[4812]: I1125 17:58:27.859447 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-p58qb"] Nov 25 17:58:27 crc kubenswrapper[4812]: I1125 17:58:27.869331 4812 scope.go:117] "RemoveContainer" containerID="b9eaf12ac27bf3d8b74af2e02402794742ab43c32916e8227a0d2befae907b37" Nov 25 17:58:27 crc kubenswrapper[4812]: I1125 17:58:27.907382 4812 scope.go:117] "RemoveContainer" containerID="e2f27e18114eaae3ebec59a92f29b6ac88483c3b8eb6d221daab8b3b03fd6557" Nov 25 17:58:27 crc kubenswrapper[4812]: E1125 17:58:27.907768 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e2f27e18114eaae3ebec59a92f29b6ac88483c3b8eb6d221daab8b3b03fd6557\": container with ID starting with e2f27e18114eaae3ebec59a92f29b6ac88483c3b8eb6d221daab8b3b03fd6557 not found: ID does not exist" containerID="e2f27e18114eaae3ebec59a92f29b6ac88483c3b8eb6d221daab8b3b03fd6557" Nov 25 17:58:27 crc kubenswrapper[4812]: I1125 17:58:27.907806 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e2f27e18114eaae3ebec59a92f29b6ac88483c3b8eb6d221daab8b3b03fd6557"} err="failed to get container status \"e2f27e18114eaae3ebec59a92f29b6ac88483c3b8eb6d221daab8b3b03fd6557\": rpc error: code = NotFound desc = could not find container \"e2f27e18114eaae3ebec59a92f29b6ac88483c3b8eb6d221daab8b3b03fd6557\": container with ID starting with e2f27e18114eaae3ebec59a92f29b6ac88483c3b8eb6d221daab8b3b03fd6557 not found: ID does not exist" Nov 25 17:58:27 crc kubenswrapper[4812]: I1125 17:58:27.907952 4812 scope.go:117] "RemoveContainer" containerID="319247db19a3d2c3c7fc8edd443129ce633fcc61fcec47070bb502079f0f2e0a" Nov 25 17:58:27 crc kubenswrapper[4812]: E1125 17:58:27.908462 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"319247db19a3d2c3c7fc8edd443129ce633fcc61fcec47070bb502079f0f2e0a\": container with ID starting with 319247db19a3d2c3c7fc8edd443129ce633fcc61fcec47070bb502079f0f2e0a not found: ID does not exist" containerID="319247db19a3d2c3c7fc8edd443129ce633fcc61fcec47070bb502079f0f2e0a" Nov 25 17:58:27 crc kubenswrapper[4812]: I1125 17:58:27.908496 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"319247db19a3d2c3c7fc8edd443129ce633fcc61fcec47070bb502079f0f2e0a"} err="failed to get container status \"319247db19a3d2c3c7fc8edd443129ce633fcc61fcec47070bb502079f0f2e0a\": rpc error: code = NotFound desc = could not find container \"319247db19a3d2c3c7fc8edd443129ce633fcc61fcec47070bb502079f0f2e0a\": container with ID starting with 319247db19a3d2c3c7fc8edd443129ce633fcc61fcec47070bb502079f0f2e0a not found: ID does not exist" Nov 25 17:58:27 crc kubenswrapper[4812]: I1125 17:58:27.908512 4812 scope.go:117] "RemoveContainer" containerID="b9eaf12ac27bf3d8b74af2e02402794742ab43c32916e8227a0d2befae907b37" Nov 25 17:58:27 crc kubenswrapper[4812]: E1125 17:58:27.908812 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b9eaf12ac27bf3d8b74af2e02402794742ab43c32916e8227a0d2befae907b37\": container with ID starting with b9eaf12ac27bf3d8b74af2e02402794742ab43c32916e8227a0d2befae907b37 not found: ID does not exist" containerID="b9eaf12ac27bf3d8b74af2e02402794742ab43c32916e8227a0d2befae907b37" Nov 25 17:58:27 crc kubenswrapper[4812]: I1125 17:58:27.908845 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b9eaf12ac27bf3d8b74af2e02402794742ab43c32916e8227a0d2befae907b37"} err="failed to get container status \"b9eaf12ac27bf3d8b74af2e02402794742ab43c32916e8227a0d2befae907b37\": rpc error: code = NotFound desc = could not find container \"b9eaf12ac27bf3d8b74af2e02402794742ab43c32916e8227a0d2befae907b37\": container with ID starting with b9eaf12ac27bf3d8b74af2e02402794742ab43c32916e8227a0d2befae907b37 not found: ID does not exist" Nov 25 17:58:29 crc kubenswrapper[4812]: I1125 17:58:29.844292 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d9da3b4f-0914-49b5-9f8c-e5fee6848226" path="/var/lib/kubelet/pods/d9da3b4f-0914-49b5-9f8c-e5fee6848226/volumes" Nov 25 17:58:34 crc kubenswrapper[4812]: I1125 17:58:34.831809 4812 scope.go:117] "RemoveContainer" containerID="117cdbf49a8d337558287c76d66fbfb9c8293bf1bb8907ad48cf2c0cf9b9d454" Nov 25 17:58:35 crc kubenswrapper[4812]: I1125 17:58:35.892628 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"6d2f6681-d1d6-45e9-b0f4-65209caf0069","Type":"ContainerStarted","Data":"f115d855d5a372ea2fabe38699d4b10232eea8be19343a3ade5352be863151ba"} Nov 25 17:58:35 crc kubenswrapper[4812]: I1125 17:58:35.893714 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/manila-api-0" Nov 25 17:58:37 crc kubenswrapper[4812]: I1125 17:58:37.832604 4812 scope.go:117] "RemoveContainer" containerID="98ad524e4fa76dbd5db0b77f4f3172a868bd73317b3f452a42d17221f9caab33" Nov 25 17:58:37 crc kubenswrapper[4812]: E1125 17:58:37.833468 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:58:48 crc kubenswrapper[4812]: I1125 17:58:48.832103 4812 scope.go:117] "RemoveContainer" containerID="98ad524e4fa76dbd5db0b77f4f3172a868bd73317b3f452a42d17221f9caab33" Nov 25 17:58:50 crc kubenswrapper[4812]: I1125 17:58:50.031566 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"8af2d4e3-790f-4ab4-92e8-1c0a083b9531","Type":"ContainerStarted","Data":"2a103b64182154148e97ac3abbc19ffde76c2a4b0dd45a7bac2798fe5ec6f606"} Nov 25 17:58:51 crc kubenswrapper[4812]: I1125 17:58:51.043094 4812 generic.go:334] "Generic (PLEG): container finished" podID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" containerID="2a103b64182154148e97ac3abbc19ffde76c2a4b0dd45a7bac2798fe5ec6f606" exitCode=1 Nov 25 17:58:51 crc kubenswrapper[4812]: I1125 17:58:51.043133 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"8af2d4e3-790f-4ab4-92e8-1c0a083b9531","Type":"ContainerDied","Data":"2a103b64182154148e97ac3abbc19ffde76c2a4b0dd45a7bac2798fe5ec6f606"} Nov 25 17:58:51 crc kubenswrapper[4812]: I1125 17:58:51.043212 4812 scope.go:117] "RemoveContainer" containerID="98ad524e4fa76dbd5db0b77f4f3172a868bd73317b3f452a42d17221f9caab33" Nov 25 17:58:51 crc kubenswrapper[4812]: I1125 17:58:51.063026 4812 scope.go:117] "RemoveContainer" containerID="2a103b64182154148e97ac3abbc19ffde76c2a4b0dd45a7bac2798fe5ec6f606" Nov 25 17:58:51 crc kubenswrapper[4812]: E1125 17:58:51.064193 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:58:51 crc kubenswrapper[4812]: I1125 17:58:51.204003 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:58:51 crc kubenswrapper[4812]: I1125 17:58:51.251333 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:58:55 crc kubenswrapper[4812]: I1125 17:58:55.164177 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 25 17:58:55 crc kubenswrapper[4812]: I1125 17:58:55.165810 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 25 17:58:55 crc kubenswrapper[4812]: I1125 17:58:55.167202 4812 scope.go:117] "RemoveContainer" containerID="2a103b64182154148e97ac3abbc19ffde76c2a4b0dd45a7bac2798fe5ec6f606" Nov 25 17:58:55 crc kubenswrapper[4812]: E1125 17:58:55.167589 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:58:56 crc kubenswrapper[4812]: I1125 17:58:56.112745 4812 scope.go:117] "RemoveContainer" containerID="2a103b64182154148e97ac3abbc19ffde76c2a4b0dd45a7bac2798fe5ec6f606" Nov 25 17:58:56 crc kubenswrapper[4812]: E1125 17:58:56.113586 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:58:57 crc kubenswrapper[4812]: I1125 17:58:57.333185 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:58:57 crc kubenswrapper[4812]: I1125 17:58:57.333872 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:59:01 crc kubenswrapper[4812]: I1125 17:59:01.163072 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:59:01 crc kubenswrapper[4812]: I1125 17:59:01.206267 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:59:05 crc kubenswrapper[4812]: I1125 17:59:05.164409 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 25 17:59:05 crc kubenswrapper[4812]: I1125 17:59:05.166361 4812 scope.go:117] "RemoveContainer" containerID="2a103b64182154148e97ac3abbc19ffde76c2a4b0dd45a7bac2798fe5ec6f606" Nov 25 17:59:05 crc kubenswrapper[4812]: E1125 17:59:05.166986 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:59:10 crc kubenswrapper[4812]: I1125 17:59:10.197842 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:59:10 crc kubenswrapper[4812]: I1125 17:59:10.201263 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:59:10 crc kubenswrapper[4812]: I1125 17:59:10.201324 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/manila-api-0" Nov 25 17:59:10 crc kubenswrapper[4812]: I1125 17:59:10.202143 4812 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="manila-api" containerStatusID={"Type":"cri-o","ID":"f115d855d5a372ea2fabe38699d4b10232eea8be19343a3ade5352be863151ba"} pod="openstack/manila-api-0" containerMessage="Container manila-api failed liveness probe, will be restarted" Nov 25 17:59:10 crc kubenswrapper[4812]: I1125 17:59:10.202190 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" containerID="cri-o://f115d855d5a372ea2fabe38699d4b10232eea8be19343a3ade5352be863151ba" gracePeriod=30 Nov 25 17:59:10 crc kubenswrapper[4812]: I1125 17:59:10.209824 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="Get \"https://10.217.1.1:8786/healthcheck\": EOF" Nov 25 17:59:14 crc kubenswrapper[4812]: I1125 17:59:14.296267 4812 generic.go:334] "Generic (PLEG): container finished" podID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerID="f115d855d5a372ea2fabe38699d4b10232eea8be19343a3ade5352be863151ba" exitCode=0 Nov 25 17:59:14 crc kubenswrapper[4812]: I1125 17:59:14.296429 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"6d2f6681-d1d6-45e9-b0f4-65209caf0069","Type":"ContainerDied","Data":"f115d855d5a372ea2fabe38699d4b10232eea8be19343a3ade5352be863151ba"} Nov 25 17:59:14 crc kubenswrapper[4812]: I1125 17:59:14.296972 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"6d2f6681-d1d6-45e9-b0f4-65209caf0069","Type":"ContainerStarted","Data":"07e88d323b14a7c558d194e2f38cb6d35fc84867a6ccc69f5e8941fc613662e9"} Nov 25 17:59:14 crc kubenswrapper[4812]: I1125 17:59:14.297005 4812 scope.go:117] "RemoveContainer" containerID="117cdbf49a8d337558287c76d66fbfb9c8293bf1bb8907ad48cf2c0cf9b9d454" Nov 25 17:59:14 crc kubenswrapper[4812]: I1125 17:59:14.297656 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/manila-api-0" Nov 25 17:59:16 crc kubenswrapper[4812]: I1125 17:59:16.831888 4812 scope.go:117] "RemoveContainer" containerID="2a103b64182154148e97ac3abbc19ffde76c2a4b0dd45a7bac2798fe5ec6f606" Nov 25 17:59:16 crc kubenswrapper[4812]: E1125 17:59:16.832668 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:59:27 crc kubenswrapper[4812]: I1125 17:59:27.332745 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:59:27 crc kubenswrapper[4812]: I1125 17:59:27.333424 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:59:30 crc kubenswrapper[4812]: I1125 17:59:30.832314 4812 scope.go:117] "RemoveContainer" containerID="2a103b64182154148e97ac3abbc19ffde76c2a4b0dd45a7bac2798fe5ec6f606" Nov 25 17:59:30 crc kubenswrapper[4812]: E1125 17:59:30.833301 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:59:31 crc kubenswrapper[4812]: I1125 17:59:31.183755 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:59:31 crc kubenswrapper[4812]: I1125 17:59:31.253277 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:59:41 crc kubenswrapper[4812]: I1125 17:59:41.135612 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:59:41 crc kubenswrapper[4812]: I1125 17:59:41.152026 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:59:44 crc kubenswrapper[4812]: I1125 17:59:44.832171 4812 scope.go:117] "RemoveContainer" containerID="2a103b64182154148e97ac3abbc19ffde76c2a4b0dd45a7bac2798fe5ec6f606" Nov 25 17:59:44 crc kubenswrapper[4812]: E1125 17:59:44.833554 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 17:59:50 crc kubenswrapper[4812]: I1125 17:59:50.193947 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:59:50 crc kubenswrapper[4812]: I1125 17:59:50.194102 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 17:59:50 crc kubenswrapper[4812]: I1125 17:59:50.194594 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/manila-api-0" Nov 25 17:59:50 crc kubenswrapper[4812]: I1125 17:59:50.195608 4812 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="manila-api" containerStatusID={"Type":"cri-o","ID":"07e88d323b14a7c558d194e2f38cb6d35fc84867a6ccc69f5e8941fc613662e9"} pod="openstack/manila-api-0" containerMessage="Container manila-api failed liveness probe, will be restarted" Nov 25 17:59:50 crc kubenswrapper[4812]: I1125 17:59:50.195659 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" containerID="cri-o://07e88d323b14a7c558d194e2f38cb6d35fc84867a6ccc69f5e8941fc613662e9" gracePeriod=30 Nov 25 17:59:50 crc kubenswrapper[4812]: I1125 17:59:50.202343 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="Get \"https://10.217.1.1:8786/healthcheck\": EOF" Nov 25 17:59:53 crc kubenswrapper[4812]: E1125 17:59:53.425066 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:59:53 crc kubenswrapper[4812]: I1125 17:59:53.652460 4812 generic.go:334] "Generic (PLEG): container finished" podID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerID="07e88d323b14a7c558d194e2f38cb6d35fc84867a6ccc69f5e8941fc613662e9" exitCode=0 Nov 25 17:59:53 crc kubenswrapper[4812]: I1125 17:59:53.652515 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"6d2f6681-d1d6-45e9-b0f4-65209caf0069","Type":"ContainerDied","Data":"07e88d323b14a7c558d194e2f38cb6d35fc84867a6ccc69f5e8941fc613662e9"} Nov 25 17:59:53 crc kubenswrapper[4812]: I1125 17:59:53.652894 4812 scope.go:117] "RemoveContainer" containerID="f115d855d5a372ea2fabe38699d4b10232eea8be19343a3ade5352be863151ba" Nov 25 17:59:53 crc kubenswrapper[4812]: I1125 17:59:53.654617 4812 scope.go:117] "RemoveContainer" containerID="07e88d323b14a7c558d194e2f38cb6d35fc84867a6ccc69f5e8941fc613662e9" Nov 25 17:59:53 crc kubenswrapper[4812]: E1125 17:59:53.654903 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 17:59:57 crc kubenswrapper[4812]: I1125 17:59:57.333291 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 17:59:57 crc kubenswrapper[4812]: I1125 17:59:57.333896 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 17:59:57 crc kubenswrapper[4812]: I1125 17:59:57.333947 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" Nov 25 17:59:57 crc kubenswrapper[4812]: I1125 17:59:57.334796 4812 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9af0f3138c22c859714afa98ee332470e15d86804218511d7c6509c67e1f94e4"} pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 17:59:57 crc kubenswrapper[4812]: I1125 17:59:57.334853 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" containerID="cri-o://9af0f3138c22c859714afa98ee332470e15d86804218511d7c6509c67e1f94e4" gracePeriod=600 Nov 25 17:59:57 crc kubenswrapper[4812]: E1125 17:59:57.485464 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:59:57 crc kubenswrapper[4812]: I1125 17:59:57.704106 4812 generic.go:334] "Generic (PLEG): container finished" podID="8ed911cf-2139-4b12-84ba-af635585ba29" containerID="9af0f3138c22c859714afa98ee332470e15d86804218511d7c6509c67e1f94e4" exitCode=0 Nov 25 17:59:57 crc kubenswrapper[4812]: I1125 17:59:57.704171 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" event={"ID":"8ed911cf-2139-4b12-84ba-af635585ba29","Type":"ContainerDied","Data":"9af0f3138c22c859714afa98ee332470e15d86804218511d7c6509c67e1f94e4"} Nov 25 17:59:57 crc kubenswrapper[4812]: I1125 17:59:57.704208 4812 scope.go:117] "RemoveContainer" containerID="065a9508690695f07a5fa4b09908b82a828df3246cb3fdae1d8a1ce83f741172" Nov 25 17:59:57 crc kubenswrapper[4812]: I1125 17:59:57.705454 4812 scope.go:117] "RemoveContainer" containerID="9af0f3138c22c859714afa98ee332470e15d86804218511d7c6509c67e1f94e4" Nov 25 17:59:57 crc kubenswrapper[4812]: E1125 17:59:57.705950 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 17:59:58 crc kubenswrapper[4812]: I1125 17:59:58.831602 4812 scope.go:117] "RemoveContainer" containerID="2a103b64182154148e97ac3abbc19ffde76c2a4b0dd45a7bac2798fe5ec6f606" Nov 25 17:59:58 crc kubenswrapper[4812]: E1125 17:59:58.832158 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:00:00 crc kubenswrapper[4812]: I1125 18:00:00.154703 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401560-xx7jn"] Nov 25 18:00:00 crc kubenswrapper[4812]: E1125 18:00:00.155211 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9da3b4f-0914-49b5-9f8c-e5fee6848226" containerName="registry-server" Nov 25 18:00:00 crc kubenswrapper[4812]: I1125 18:00:00.155229 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9da3b4f-0914-49b5-9f8c-e5fee6848226" containerName="registry-server" Nov 25 18:00:00 crc kubenswrapper[4812]: E1125 18:00:00.155244 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9da3b4f-0914-49b5-9f8c-e5fee6848226" containerName="extract-utilities" Nov 25 18:00:00 crc kubenswrapper[4812]: I1125 18:00:00.155253 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9da3b4f-0914-49b5-9f8c-e5fee6848226" containerName="extract-utilities" Nov 25 18:00:00 crc kubenswrapper[4812]: E1125 18:00:00.155262 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9da3b4f-0914-49b5-9f8c-e5fee6848226" containerName="extract-content" Nov 25 18:00:00 crc kubenswrapper[4812]: I1125 18:00:00.155269 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9da3b4f-0914-49b5-9f8c-e5fee6848226" containerName="extract-content" Nov 25 18:00:00 crc kubenswrapper[4812]: I1125 18:00:00.155512 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="d9da3b4f-0914-49b5-9f8c-e5fee6848226" containerName="registry-server" Nov 25 18:00:00 crc kubenswrapper[4812]: I1125 18:00:00.156215 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401560-xx7jn" Nov 25 18:00:00 crc kubenswrapper[4812]: I1125 18:00:00.158644 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 18:00:00 crc kubenswrapper[4812]: I1125 18:00:00.158801 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 18:00:00 crc kubenswrapper[4812]: I1125 18:00:00.186902 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401560-xx7jn"] Nov 25 18:00:00 crc kubenswrapper[4812]: I1125 18:00:00.281462 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0-config-volume\") pod \"collect-profiles-29401560-xx7jn\" (UID: \"4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401560-xx7jn" Nov 25 18:00:00 crc kubenswrapper[4812]: I1125 18:00:00.281564 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0-secret-volume\") pod \"collect-profiles-29401560-xx7jn\" (UID: \"4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401560-xx7jn" Nov 25 18:00:00 crc kubenswrapper[4812]: I1125 18:00:00.281620 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8jq4b\" (UniqueName: \"kubernetes.io/projected/4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0-kube-api-access-8jq4b\") pod \"collect-profiles-29401560-xx7jn\" (UID: \"4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401560-xx7jn" Nov 25 18:00:00 crc kubenswrapper[4812]: I1125 18:00:00.383527 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0-config-volume\") pod \"collect-profiles-29401560-xx7jn\" (UID: \"4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401560-xx7jn" Nov 25 18:00:00 crc kubenswrapper[4812]: I1125 18:00:00.383628 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0-secret-volume\") pod \"collect-profiles-29401560-xx7jn\" (UID: \"4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401560-xx7jn" Nov 25 18:00:00 crc kubenswrapper[4812]: I1125 18:00:00.383667 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8jq4b\" (UniqueName: \"kubernetes.io/projected/4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0-kube-api-access-8jq4b\") pod \"collect-profiles-29401560-xx7jn\" (UID: \"4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401560-xx7jn" Nov 25 18:00:00 crc kubenswrapper[4812]: I1125 18:00:00.384820 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0-config-volume\") pod \"collect-profiles-29401560-xx7jn\" (UID: \"4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401560-xx7jn" Nov 25 18:00:00 crc kubenswrapper[4812]: I1125 18:00:00.959063 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0-secret-volume\") pod \"collect-profiles-29401560-xx7jn\" (UID: \"4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401560-xx7jn" Nov 25 18:00:00 crc kubenswrapper[4812]: I1125 18:00:00.960095 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8jq4b\" (UniqueName: \"kubernetes.io/projected/4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0-kube-api-access-8jq4b\") pod \"collect-profiles-29401560-xx7jn\" (UID: \"4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401560-xx7jn" Nov 25 18:00:01 crc kubenswrapper[4812]: I1125 18:00:01.087392 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401560-xx7jn" Nov 25 18:00:01 crc kubenswrapper[4812]: I1125 18:00:01.607693 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401560-xx7jn"] Nov 25 18:00:01 crc kubenswrapper[4812]: W1125 18:00:01.612506 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4cb6f57f_ead9_42b7_bf4f_5c7c899b6ce0.slice/crio-95f0a779b5833166fc126e0a2f4de6facac6e97f3e68d49cf84b095eaf1493e5 WatchSource:0}: Error finding container 95f0a779b5833166fc126e0a2f4de6facac6e97f3e68d49cf84b095eaf1493e5: Status 404 returned error can't find the container with id 95f0a779b5833166fc126e0a2f4de6facac6e97f3e68d49cf84b095eaf1493e5 Nov 25 18:00:01 crc kubenswrapper[4812]: I1125 18:00:01.744933 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401560-xx7jn" event={"ID":"4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0","Type":"ContainerStarted","Data":"95f0a779b5833166fc126e0a2f4de6facac6e97f3e68d49cf84b095eaf1493e5"} Nov 25 18:00:02 crc kubenswrapper[4812]: I1125 18:00:02.754767 4812 generic.go:334] "Generic (PLEG): container finished" podID="4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0" containerID="8977605ae5c32d0308e21837689d5e669c751bbc4370ec706d94bc8ce6f2cc75" exitCode=0 Nov 25 18:00:02 crc kubenswrapper[4812]: I1125 18:00:02.754866 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401560-xx7jn" event={"ID":"4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0","Type":"ContainerDied","Data":"8977605ae5c32d0308e21837689d5e669c751bbc4370ec706d94bc8ce6f2cc75"} Nov 25 18:00:04 crc kubenswrapper[4812]: I1125 18:00:04.197075 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401560-xx7jn" Nov 25 18:00:04 crc kubenswrapper[4812]: I1125 18:00:04.360090 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0-secret-volume\") pod \"4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0\" (UID: \"4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0\") " Nov 25 18:00:04 crc kubenswrapper[4812]: I1125 18:00:04.360191 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0-config-volume\") pod \"4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0\" (UID: \"4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0\") " Nov 25 18:00:04 crc kubenswrapper[4812]: I1125 18:00:04.360285 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8jq4b\" (UniqueName: \"kubernetes.io/projected/4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0-kube-api-access-8jq4b\") pod \"4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0\" (UID: \"4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0\") " Nov 25 18:00:04 crc kubenswrapper[4812]: I1125 18:00:04.360869 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0-config-volume" (OuterVolumeSpecName: "config-volume") pod "4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0" (UID: "4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 18:00:04 crc kubenswrapper[4812]: I1125 18:00:04.366680 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0" (UID: "4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:00:04 crc kubenswrapper[4812]: I1125 18:00:04.367827 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0-kube-api-access-8jq4b" (OuterVolumeSpecName: "kube-api-access-8jq4b") pod "4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0" (UID: "4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0"). InnerVolumeSpecName "kube-api-access-8jq4b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:00:04 crc kubenswrapper[4812]: I1125 18:00:04.462002 4812 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 18:00:04 crc kubenswrapper[4812]: I1125 18:00:04.462032 4812 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 18:00:04 crc kubenswrapper[4812]: I1125 18:00:04.462042 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8jq4b\" (UniqueName: \"kubernetes.io/projected/4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0-kube-api-access-8jq4b\") on node \"crc\" DevicePath \"\"" Nov 25 18:00:04 crc kubenswrapper[4812]: I1125 18:00:04.774616 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401560-xx7jn" event={"ID":"4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0","Type":"ContainerDied","Data":"95f0a779b5833166fc126e0a2f4de6facac6e97f3e68d49cf84b095eaf1493e5"} Nov 25 18:00:04 crc kubenswrapper[4812]: I1125 18:00:04.774653 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="95f0a779b5833166fc126e0a2f4de6facac6e97f3e68d49cf84b095eaf1493e5" Nov 25 18:00:04 crc kubenswrapper[4812]: I1125 18:00:04.774706 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401560-xx7jn" Nov 25 18:00:05 crc kubenswrapper[4812]: I1125 18:00:05.287265 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401515-lb6pb"] Nov 25 18:00:05 crc kubenswrapper[4812]: I1125 18:00:05.296337 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401515-lb6pb"] Nov 25 18:00:05 crc kubenswrapper[4812]: I1125 18:00:05.851523 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a83003d5-18a9-482d-8348-b4e3995a8db3" path="/var/lib/kubelet/pods/a83003d5-18a9-482d-8348-b4e3995a8db3/volumes" Nov 25 18:00:06 crc kubenswrapper[4812]: I1125 18:00:06.831467 4812 scope.go:117] "RemoveContainer" containerID="07e88d323b14a7c558d194e2f38cb6d35fc84867a6ccc69f5e8941fc613662e9" Nov 25 18:00:06 crc kubenswrapper[4812]: E1125 18:00:06.832155 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 18:00:10 crc kubenswrapper[4812]: I1125 18:00:10.838427 4812 scope.go:117] "RemoveContainer" containerID="2a103b64182154148e97ac3abbc19ffde76c2a4b0dd45a7bac2798fe5ec6f606" Nov 25 18:00:10 crc kubenswrapper[4812]: E1125 18:00:10.839010 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:00:11 crc kubenswrapper[4812]: I1125 18:00:11.831891 4812 scope.go:117] "RemoveContainer" containerID="9af0f3138c22c859714afa98ee332470e15d86804218511d7c6509c67e1f94e4" Nov 25 18:00:11 crc kubenswrapper[4812]: E1125 18:00:11.832508 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 18:00:16 crc kubenswrapper[4812]: I1125 18:00:16.383604 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-9htqb"] Nov 25 18:00:16 crc kubenswrapper[4812]: E1125 18:00:16.384914 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0" containerName="collect-profiles" Nov 25 18:00:16 crc kubenswrapper[4812]: I1125 18:00:16.384929 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0" containerName="collect-profiles" Nov 25 18:00:16 crc kubenswrapper[4812]: I1125 18:00:16.385147 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="4cb6f57f-ead9-42b7-bf4f-5c7c899b6ce0" containerName="collect-profiles" Nov 25 18:00:16 crc kubenswrapper[4812]: I1125 18:00:16.386521 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9htqb" Nov 25 18:00:16 crc kubenswrapper[4812]: I1125 18:00:16.399484 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9htqb"] Nov 25 18:00:16 crc kubenswrapper[4812]: I1125 18:00:16.551697 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-76mbd\" (UniqueName: \"kubernetes.io/projected/d694f5f3-c512-4a4f-a4b3-aa3fb7839c19-kube-api-access-76mbd\") pod \"certified-operators-9htqb\" (UID: \"d694f5f3-c512-4a4f-a4b3-aa3fb7839c19\") " pod="openshift-marketplace/certified-operators-9htqb" Nov 25 18:00:16 crc kubenswrapper[4812]: I1125 18:00:16.551815 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d694f5f3-c512-4a4f-a4b3-aa3fb7839c19-catalog-content\") pod \"certified-operators-9htqb\" (UID: \"d694f5f3-c512-4a4f-a4b3-aa3fb7839c19\") " pod="openshift-marketplace/certified-operators-9htqb" Nov 25 18:00:16 crc kubenswrapper[4812]: I1125 18:00:16.552156 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d694f5f3-c512-4a4f-a4b3-aa3fb7839c19-utilities\") pod \"certified-operators-9htqb\" (UID: \"d694f5f3-c512-4a4f-a4b3-aa3fb7839c19\") " pod="openshift-marketplace/certified-operators-9htqb" Nov 25 18:00:16 crc kubenswrapper[4812]: I1125 18:00:16.654719 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d694f5f3-c512-4a4f-a4b3-aa3fb7839c19-utilities\") pod \"certified-operators-9htqb\" (UID: \"d694f5f3-c512-4a4f-a4b3-aa3fb7839c19\") " pod="openshift-marketplace/certified-operators-9htqb" Nov 25 18:00:16 crc kubenswrapper[4812]: I1125 18:00:16.654800 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-76mbd\" (UniqueName: \"kubernetes.io/projected/d694f5f3-c512-4a4f-a4b3-aa3fb7839c19-kube-api-access-76mbd\") pod \"certified-operators-9htqb\" (UID: \"d694f5f3-c512-4a4f-a4b3-aa3fb7839c19\") " pod="openshift-marketplace/certified-operators-9htqb" Nov 25 18:00:16 crc kubenswrapper[4812]: I1125 18:00:16.654840 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d694f5f3-c512-4a4f-a4b3-aa3fb7839c19-catalog-content\") pod \"certified-operators-9htqb\" (UID: \"d694f5f3-c512-4a4f-a4b3-aa3fb7839c19\") " pod="openshift-marketplace/certified-operators-9htqb" Nov 25 18:00:16 crc kubenswrapper[4812]: I1125 18:00:16.655741 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d694f5f3-c512-4a4f-a4b3-aa3fb7839c19-utilities\") pod \"certified-operators-9htqb\" (UID: \"d694f5f3-c512-4a4f-a4b3-aa3fb7839c19\") " pod="openshift-marketplace/certified-operators-9htqb" Nov 25 18:00:16 crc kubenswrapper[4812]: I1125 18:00:16.655805 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d694f5f3-c512-4a4f-a4b3-aa3fb7839c19-catalog-content\") pod \"certified-operators-9htqb\" (UID: \"d694f5f3-c512-4a4f-a4b3-aa3fb7839c19\") " pod="openshift-marketplace/certified-operators-9htqb" Nov 25 18:00:16 crc kubenswrapper[4812]: I1125 18:00:16.676959 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-76mbd\" (UniqueName: \"kubernetes.io/projected/d694f5f3-c512-4a4f-a4b3-aa3fb7839c19-kube-api-access-76mbd\") pod \"certified-operators-9htqb\" (UID: \"d694f5f3-c512-4a4f-a4b3-aa3fb7839c19\") " pod="openshift-marketplace/certified-operators-9htqb" Nov 25 18:00:16 crc kubenswrapper[4812]: I1125 18:00:16.706075 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9htqb" Nov 25 18:00:17 crc kubenswrapper[4812]: I1125 18:00:17.422604 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9htqb"] Nov 25 18:00:17 crc kubenswrapper[4812]: I1125 18:00:17.952807 4812 generic.go:334] "Generic (PLEG): container finished" podID="d694f5f3-c512-4a4f-a4b3-aa3fb7839c19" containerID="708a5254b89de8a2e17f5d8a78395ad0570e8526d12dbd24999a622d0f64f55b" exitCode=0 Nov 25 18:00:17 crc kubenswrapper[4812]: I1125 18:00:17.952849 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9htqb" event={"ID":"d694f5f3-c512-4a4f-a4b3-aa3fb7839c19","Type":"ContainerDied","Data":"708a5254b89de8a2e17f5d8a78395ad0570e8526d12dbd24999a622d0f64f55b"} Nov 25 18:00:17 crc kubenswrapper[4812]: I1125 18:00:17.952873 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9htqb" event={"ID":"d694f5f3-c512-4a4f-a4b3-aa3fb7839c19","Type":"ContainerStarted","Data":"f771b4300f211c3a7523476a6838be5a9ecaf6f5e223645e7f9f9ec3c0eeda66"} Nov 25 18:00:17 crc kubenswrapper[4812]: I1125 18:00:17.955327 4812 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 18:00:19 crc kubenswrapper[4812]: I1125 18:00:19.972169 4812 generic.go:334] "Generic (PLEG): container finished" podID="d694f5f3-c512-4a4f-a4b3-aa3fb7839c19" containerID="a4ce9e2edc7cdd5bcb50b37701f570604e43e0b580dfe0921b74ba724375a2f3" exitCode=0 Nov 25 18:00:19 crc kubenswrapper[4812]: I1125 18:00:19.973633 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9htqb" event={"ID":"d694f5f3-c512-4a4f-a4b3-aa3fb7839c19","Type":"ContainerDied","Data":"a4ce9e2edc7cdd5bcb50b37701f570604e43e0b580dfe0921b74ba724375a2f3"} Nov 25 18:00:21 crc kubenswrapper[4812]: I1125 18:00:21.831577 4812 scope.go:117] "RemoveContainer" containerID="07e88d323b14a7c558d194e2f38cb6d35fc84867a6ccc69f5e8941fc613662e9" Nov 25 18:00:21 crc kubenswrapper[4812]: E1125 18:00:21.832315 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 18:00:21 crc kubenswrapper[4812]: I1125 18:00:21.994036 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9htqb" event={"ID":"d694f5f3-c512-4a4f-a4b3-aa3fb7839c19","Type":"ContainerStarted","Data":"7bf8c857e212b667c29fba238bae8adddf1973ff5a0900581b4a64858662a71d"} Nov 25 18:00:22 crc kubenswrapper[4812]: I1125 18:00:22.027669 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-9htqb" podStartSLOduration=2.974021084 podStartE2EDuration="6.027647605s" podCreationTimestamp="2025-11-25 18:00:16 +0000 UTC" firstStartedPulling="2025-11-25 18:00:17.955121603 +0000 UTC m=+4392.795263698" lastFinishedPulling="2025-11-25 18:00:21.008748124 +0000 UTC m=+4395.848890219" observedRunningTime="2025-11-25 18:00:22.013114834 +0000 UTC m=+4396.853256969" watchObservedRunningTime="2025-11-25 18:00:22.027647605 +0000 UTC m=+4396.867789700" Nov 25 18:00:22 crc kubenswrapper[4812]: I1125 18:00:22.831386 4812 scope.go:117] "RemoveContainer" containerID="2a103b64182154148e97ac3abbc19ffde76c2a4b0dd45a7bac2798fe5ec6f606" Nov 25 18:00:22 crc kubenswrapper[4812]: E1125 18:00:22.832005 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:00:25 crc kubenswrapper[4812]: I1125 18:00:25.840197 4812 scope.go:117] "RemoveContainer" containerID="9af0f3138c22c859714afa98ee332470e15d86804218511d7c6509c67e1f94e4" Nov 25 18:00:25 crc kubenswrapper[4812]: E1125 18:00:25.840879 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 18:00:26 crc kubenswrapper[4812]: I1125 18:00:26.707054 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-9htqb" Nov 25 18:00:26 crc kubenswrapper[4812]: I1125 18:00:26.707412 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-9htqb" Nov 25 18:00:26 crc kubenswrapper[4812]: I1125 18:00:26.766222 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-9htqb" Nov 25 18:00:27 crc kubenswrapper[4812]: I1125 18:00:27.111316 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-9htqb" Nov 25 18:00:27 crc kubenswrapper[4812]: I1125 18:00:27.161655 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9htqb"] Nov 25 18:00:29 crc kubenswrapper[4812]: I1125 18:00:29.065721 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-9htqb" podUID="d694f5f3-c512-4a4f-a4b3-aa3fb7839c19" containerName="registry-server" containerID="cri-o://7bf8c857e212b667c29fba238bae8adddf1973ff5a0900581b4a64858662a71d" gracePeriod=2 Nov 25 18:00:29 crc kubenswrapper[4812]: I1125 18:00:29.681626 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9htqb" Nov 25 18:00:29 crc kubenswrapper[4812]: I1125 18:00:29.781199 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d694f5f3-c512-4a4f-a4b3-aa3fb7839c19-catalog-content\") pod \"d694f5f3-c512-4a4f-a4b3-aa3fb7839c19\" (UID: \"d694f5f3-c512-4a4f-a4b3-aa3fb7839c19\") " Nov 25 18:00:29 crc kubenswrapper[4812]: I1125 18:00:29.781351 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d694f5f3-c512-4a4f-a4b3-aa3fb7839c19-utilities\") pod \"d694f5f3-c512-4a4f-a4b3-aa3fb7839c19\" (UID: \"d694f5f3-c512-4a4f-a4b3-aa3fb7839c19\") " Nov 25 18:00:29 crc kubenswrapper[4812]: I1125 18:00:29.781592 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-76mbd\" (UniqueName: \"kubernetes.io/projected/d694f5f3-c512-4a4f-a4b3-aa3fb7839c19-kube-api-access-76mbd\") pod \"d694f5f3-c512-4a4f-a4b3-aa3fb7839c19\" (UID: \"d694f5f3-c512-4a4f-a4b3-aa3fb7839c19\") " Nov 25 18:00:29 crc kubenswrapper[4812]: I1125 18:00:29.783390 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d694f5f3-c512-4a4f-a4b3-aa3fb7839c19-utilities" (OuterVolumeSpecName: "utilities") pod "d694f5f3-c512-4a4f-a4b3-aa3fb7839c19" (UID: "d694f5f3-c512-4a4f-a4b3-aa3fb7839c19"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:00:29 crc kubenswrapper[4812]: I1125 18:00:29.790559 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d694f5f3-c512-4a4f-a4b3-aa3fb7839c19-kube-api-access-76mbd" (OuterVolumeSpecName: "kube-api-access-76mbd") pod "d694f5f3-c512-4a4f-a4b3-aa3fb7839c19" (UID: "d694f5f3-c512-4a4f-a4b3-aa3fb7839c19"). InnerVolumeSpecName "kube-api-access-76mbd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:00:29 crc kubenswrapper[4812]: I1125 18:00:29.851188 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d694f5f3-c512-4a4f-a4b3-aa3fb7839c19-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d694f5f3-c512-4a4f-a4b3-aa3fb7839c19" (UID: "d694f5f3-c512-4a4f-a4b3-aa3fb7839c19"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:00:29 crc kubenswrapper[4812]: I1125 18:00:29.883401 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-76mbd\" (UniqueName: \"kubernetes.io/projected/d694f5f3-c512-4a4f-a4b3-aa3fb7839c19-kube-api-access-76mbd\") on node \"crc\" DevicePath \"\"" Nov 25 18:00:29 crc kubenswrapper[4812]: I1125 18:00:29.883435 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d694f5f3-c512-4a4f-a4b3-aa3fb7839c19-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:00:29 crc kubenswrapper[4812]: I1125 18:00:29.883446 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d694f5f3-c512-4a4f-a4b3-aa3fb7839c19-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:00:30 crc kubenswrapper[4812]: I1125 18:00:30.081762 4812 generic.go:334] "Generic (PLEG): container finished" podID="d694f5f3-c512-4a4f-a4b3-aa3fb7839c19" containerID="7bf8c857e212b667c29fba238bae8adddf1973ff5a0900581b4a64858662a71d" exitCode=0 Nov 25 18:00:30 crc kubenswrapper[4812]: I1125 18:00:30.081811 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9htqb" event={"ID":"d694f5f3-c512-4a4f-a4b3-aa3fb7839c19","Type":"ContainerDied","Data":"7bf8c857e212b667c29fba238bae8adddf1973ff5a0900581b4a64858662a71d"} Nov 25 18:00:30 crc kubenswrapper[4812]: I1125 18:00:30.081830 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9htqb" Nov 25 18:00:30 crc kubenswrapper[4812]: I1125 18:00:30.081852 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9htqb" event={"ID":"d694f5f3-c512-4a4f-a4b3-aa3fb7839c19","Type":"ContainerDied","Data":"f771b4300f211c3a7523476a6838be5a9ecaf6f5e223645e7f9f9ec3c0eeda66"} Nov 25 18:00:30 crc kubenswrapper[4812]: I1125 18:00:30.081877 4812 scope.go:117] "RemoveContainer" containerID="7bf8c857e212b667c29fba238bae8adddf1973ff5a0900581b4a64858662a71d" Nov 25 18:00:30 crc kubenswrapper[4812]: I1125 18:00:30.113468 4812 scope.go:117] "RemoveContainer" containerID="a4ce9e2edc7cdd5bcb50b37701f570604e43e0b580dfe0921b74ba724375a2f3" Nov 25 18:00:30 crc kubenswrapper[4812]: I1125 18:00:30.129465 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9htqb"] Nov 25 18:00:30 crc kubenswrapper[4812]: I1125 18:00:30.147201 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-9htqb"] Nov 25 18:00:30 crc kubenswrapper[4812]: I1125 18:00:30.161372 4812 scope.go:117] "RemoveContainer" containerID="708a5254b89de8a2e17f5d8a78395ad0570e8526d12dbd24999a622d0f64f55b" Nov 25 18:00:30 crc kubenswrapper[4812]: I1125 18:00:30.191884 4812 scope.go:117] "RemoveContainer" containerID="7bf8c857e212b667c29fba238bae8adddf1973ff5a0900581b4a64858662a71d" Nov 25 18:00:30 crc kubenswrapper[4812]: E1125 18:00:30.192453 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7bf8c857e212b667c29fba238bae8adddf1973ff5a0900581b4a64858662a71d\": container with ID starting with 7bf8c857e212b667c29fba238bae8adddf1973ff5a0900581b4a64858662a71d not found: ID does not exist" containerID="7bf8c857e212b667c29fba238bae8adddf1973ff5a0900581b4a64858662a71d" Nov 25 18:00:30 crc kubenswrapper[4812]: I1125 18:00:30.192500 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7bf8c857e212b667c29fba238bae8adddf1973ff5a0900581b4a64858662a71d"} err="failed to get container status \"7bf8c857e212b667c29fba238bae8adddf1973ff5a0900581b4a64858662a71d\": rpc error: code = NotFound desc = could not find container \"7bf8c857e212b667c29fba238bae8adddf1973ff5a0900581b4a64858662a71d\": container with ID starting with 7bf8c857e212b667c29fba238bae8adddf1973ff5a0900581b4a64858662a71d not found: ID does not exist" Nov 25 18:00:30 crc kubenswrapper[4812]: I1125 18:00:30.192603 4812 scope.go:117] "RemoveContainer" containerID="a4ce9e2edc7cdd5bcb50b37701f570604e43e0b580dfe0921b74ba724375a2f3" Nov 25 18:00:30 crc kubenswrapper[4812]: E1125 18:00:30.193392 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a4ce9e2edc7cdd5bcb50b37701f570604e43e0b580dfe0921b74ba724375a2f3\": container with ID starting with a4ce9e2edc7cdd5bcb50b37701f570604e43e0b580dfe0921b74ba724375a2f3 not found: ID does not exist" containerID="a4ce9e2edc7cdd5bcb50b37701f570604e43e0b580dfe0921b74ba724375a2f3" Nov 25 18:00:30 crc kubenswrapper[4812]: I1125 18:00:30.193425 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a4ce9e2edc7cdd5bcb50b37701f570604e43e0b580dfe0921b74ba724375a2f3"} err="failed to get container status \"a4ce9e2edc7cdd5bcb50b37701f570604e43e0b580dfe0921b74ba724375a2f3\": rpc error: code = NotFound desc = could not find container \"a4ce9e2edc7cdd5bcb50b37701f570604e43e0b580dfe0921b74ba724375a2f3\": container with ID starting with a4ce9e2edc7cdd5bcb50b37701f570604e43e0b580dfe0921b74ba724375a2f3 not found: ID does not exist" Nov 25 18:00:30 crc kubenswrapper[4812]: I1125 18:00:30.193443 4812 scope.go:117] "RemoveContainer" containerID="708a5254b89de8a2e17f5d8a78395ad0570e8526d12dbd24999a622d0f64f55b" Nov 25 18:00:30 crc kubenswrapper[4812]: E1125 18:00:30.194194 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"708a5254b89de8a2e17f5d8a78395ad0570e8526d12dbd24999a622d0f64f55b\": container with ID starting with 708a5254b89de8a2e17f5d8a78395ad0570e8526d12dbd24999a622d0f64f55b not found: ID does not exist" containerID="708a5254b89de8a2e17f5d8a78395ad0570e8526d12dbd24999a622d0f64f55b" Nov 25 18:00:30 crc kubenswrapper[4812]: I1125 18:00:30.194248 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"708a5254b89de8a2e17f5d8a78395ad0570e8526d12dbd24999a622d0f64f55b"} err="failed to get container status \"708a5254b89de8a2e17f5d8a78395ad0570e8526d12dbd24999a622d0f64f55b\": rpc error: code = NotFound desc = could not find container \"708a5254b89de8a2e17f5d8a78395ad0570e8526d12dbd24999a622d0f64f55b\": container with ID starting with 708a5254b89de8a2e17f5d8a78395ad0570e8526d12dbd24999a622d0f64f55b not found: ID does not exist" Nov 25 18:00:31 crc kubenswrapper[4812]: I1125 18:00:31.843956 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d694f5f3-c512-4a4f-a4b3-aa3fb7839c19" path="/var/lib/kubelet/pods/d694f5f3-c512-4a4f-a4b3-aa3fb7839c19/volumes" Nov 25 18:00:33 crc kubenswrapper[4812]: I1125 18:00:33.832393 4812 scope.go:117] "RemoveContainer" containerID="07e88d323b14a7c558d194e2f38cb6d35fc84867a6ccc69f5e8941fc613662e9" Nov 25 18:00:33 crc kubenswrapper[4812]: E1125 18:00:33.833311 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 18:00:35 crc kubenswrapper[4812]: I1125 18:00:35.841081 4812 scope.go:117] "RemoveContainer" containerID="2a103b64182154148e97ac3abbc19ffde76c2a4b0dd45a7bac2798fe5ec6f606" Nov 25 18:00:35 crc kubenswrapper[4812]: E1125 18:00:35.841925 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:00:38 crc kubenswrapper[4812]: I1125 18:00:38.832185 4812 scope.go:117] "RemoveContainer" containerID="9af0f3138c22c859714afa98ee332470e15d86804218511d7c6509c67e1f94e4" Nov 25 18:00:38 crc kubenswrapper[4812]: E1125 18:00:38.833869 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 18:00:46 crc kubenswrapper[4812]: I1125 18:00:46.832089 4812 scope.go:117] "RemoveContainer" containerID="07e88d323b14a7c558d194e2f38cb6d35fc84867a6ccc69f5e8941fc613662e9" Nov 25 18:00:46 crc kubenswrapper[4812]: I1125 18:00:46.832643 4812 scope.go:117] "RemoveContainer" containerID="2a103b64182154148e97ac3abbc19ffde76c2a4b0dd45a7bac2798fe5ec6f606" Nov 25 18:00:46 crc kubenswrapper[4812]: E1125 18:00:46.832745 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 18:00:46 crc kubenswrapper[4812]: E1125 18:00:46.832930 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:00:53 crc kubenswrapper[4812]: I1125 18:00:53.832731 4812 scope.go:117] "RemoveContainer" containerID="9af0f3138c22c859714afa98ee332470e15d86804218511d7c6509c67e1f94e4" Nov 25 18:00:53 crc kubenswrapper[4812]: E1125 18:00:53.833840 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 18:00:59 crc kubenswrapper[4812]: I1125 18:00:59.832613 4812 scope.go:117] "RemoveContainer" containerID="2a103b64182154148e97ac3abbc19ffde76c2a4b0dd45a7bac2798fe5ec6f606" Nov 25 18:00:59 crc kubenswrapper[4812]: E1125 18:00:59.833703 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:01:00 crc kubenswrapper[4812]: I1125 18:01:00.189411 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29401561-vrmz5"] Nov 25 18:01:00 crc kubenswrapper[4812]: E1125 18:01:00.189929 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d694f5f3-c512-4a4f-a4b3-aa3fb7839c19" containerName="extract-content" Nov 25 18:01:00 crc kubenswrapper[4812]: I1125 18:01:00.189955 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="d694f5f3-c512-4a4f-a4b3-aa3fb7839c19" containerName="extract-content" Nov 25 18:01:00 crc kubenswrapper[4812]: E1125 18:01:00.189975 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d694f5f3-c512-4a4f-a4b3-aa3fb7839c19" containerName="registry-server" Nov 25 18:01:00 crc kubenswrapper[4812]: I1125 18:01:00.189984 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="d694f5f3-c512-4a4f-a4b3-aa3fb7839c19" containerName="registry-server" Nov 25 18:01:00 crc kubenswrapper[4812]: E1125 18:01:00.190013 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d694f5f3-c512-4a4f-a4b3-aa3fb7839c19" containerName="extract-utilities" Nov 25 18:01:00 crc kubenswrapper[4812]: I1125 18:01:00.190022 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="d694f5f3-c512-4a4f-a4b3-aa3fb7839c19" containerName="extract-utilities" Nov 25 18:01:00 crc kubenswrapper[4812]: I1125 18:01:00.190256 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="d694f5f3-c512-4a4f-a4b3-aa3fb7839c19" containerName="registry-server" Nov 25 18:01:00 crc kubenswrapper[4812]: I1125 18:01:00.191095 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401561-vrmz5" Nov 25 18:01:00 crc kubenswrapper[4812]: I1125 18:01:00.215880 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29401561-vrmz5"] Nov 25 18:01:00 crc kubenswrapper[4812]: I1125 18:01:00.353499 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gl92c\" (UniqueName: \"kubernetes.io/projected/886ccfe7-797e-4238-a39a-d67b2cb91d8b-kube-api-access-gl92c\") pod \"keystone-cron-29401561-vrmz5\" (UID: \"886ccfe7-797e-4238-a39a-d67b2cb91d8b\") " pod="openstack/keystone-cron-29401561-vrmz5" Nov 25 18:01:00 crc kubenswrapper[4812]: I1125 18:01:00.353752 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/886ccfe7-797e-4238-a39a-d67b2cb91d8b-config-data\") pod \"keystone-cron-29401561-vrmz5\" (UID: \"886ccfe7-797e-4238-a39a-d67b2cb91d8b\") " pod="openstack/keystone-cron-29401561-vrmz5" Nov 25 18:01:00 crc kubenswrapper[4812]: I1125 18:01:00.353798 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/886ccfe7-797e-4238-a39a-d67b2cb91d8b-combined-ca-bundle\") pod \"keystone-cron-29401561-vrmz5\" (UID: \"886ccfe7-797e-4238-a39a-d67b2cb91d8b\") " pod="openstack/keystone-cron-29401561-vrmz5" Nov 25 18:01:00 crc kubenswrapper[4812]: I1125 18:01:00.353890 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/886ccfe7-797e-4238-a39a-d67b2cb91d8b-fernet-keys\") pod \"keystone-cron-29401561-vrmz5\" (UID: \"886ccfe7-797e-4238-a39a-d67b2cb91d8b\") " pod="openstack/keystone-cron-29401561-vrmz5" Nov 25 18:01:00 crc kubenswrapper[4812]: I1125 18:01:00.455591 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/886ccfe7-797e-4238-a39a-d67b2cb91d8b-config-data\") pod \"keystone-cron-29401561-vrmz5\" (UID: \"886ccfe7-797e-4238-a39a-d67b2cb91d8b\") " pod="openstack/keystone-cron-29401561-vrmz5" Nov 25 18:01:00 crc kubenswrapper[4812]: I1125 18:01:00.455660 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/886ccfe7-797e-4238-a39a-d67b2cb91d8b-combined-ca-bundle\") pod \"keystone-cron-29401561-vrmz5\" (UID: \"886ccfe7-797e-4238-a39a-d67b2cb91d8b\") " pod="openstack/keystone-cron-29401561-vrmz5" Nov 25 18:01:00 crc kubenswrapper[4812]: I1125 18:01:00.455710 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/886ccfe7-797e-4238-a39a-d67b2cb91d8b-fernet-keys\") pod \"keystone-cron-29401561-vrmz5\" (UID: \"886ccfe7-797e-4238-a39a-d67b2cb91d8b\") " pod="openstack/keystone-cron-29401561-vrmz5" Nov 25 18:01:00 crc kubenswrapper[4812]: I1125 18:01:00.455916 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gl92c\" (UniqueName: \"kubernetes.io/projected/886ccfe7-797e-4238-a39a-d67b2cb91d8b-kube-api-access-gl92c\") pod \"keystone-cron-29401561-vrmz5\" (UID: \"886ccfe7-797e-4238-a39a-d67b2cb91d8b\") " pod="openstack/keystone-cron-29401561-vrmz5" Nov 25 18:01:00 crc kubenswrapper[4812]: I1125 18:01:00.463218 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/886ccfe7-797e-4238-a39a-d67b2cb91d8b-config-data\") pod \"keystone-cron-29401561-vrmz5\" (UID: \"886ccfe7-797e-4238-a39a-d67b2cb91d8b\") " pod="openstack/keystone-cron-29401561-vrmz5" Nov 25 18:01:00 crc kubenswrapper[4812]: I1125 18:01:00.465345 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/886ccfe7-797e-4238-a39a-d67b2cb91d8b-combined-ca-bundle\") pod \"keystone-cron-29401561-vrmz5\" (UID: \"886ccfe7-797e-4238-a39a-d67b2cb91d8b\") " pod="openstack/keystone-cron-29401561-vrmz5" Nov 25 18:01:00 crc kubenswrapper[4812]: I1125 18:01:00.468655 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/886ccfe7-797e-4238-a39a-d67b2cb91d8b-fernet-keys\") pod \"keystone-cron-29401561-vrmz5\" (UID: \"886ccfe7-797e-4238-a39a-d67b2cb91d8b\") " pod="openstack/keystone-cron-29401561-vrmz5" Nov 25 18:01:00 crc kubenswrapper[4812]: I1125 18:01:00.477861 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gl92c\" (UniqueName: \"kubernetes.io/projected/886ccfe7-797e-4238-a39a-d67b2cb91d8b-kube-api-access-gl92c\") pod \"keystone-cron-29401561-vrmz5\" (UID: \"886ccfe7-797e-4238-a39a-d67b2cb91d8b\") " pod="openstack/keystone-cron-29401561-vrmz5" Nov 25 18:01:00 crc kubenswrapper[4812]: I1125 18:01:00.525862 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401561-vrmz5" Nov 25 18:01:01 crc kubenswrapper[4812]: I1125 18:01:01.017797 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29401561-vrmz5"] Nov 25 18:01:01 crc kubenswrapper[4812]: W1125 18:01:01.028723 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod886ccfe7_797e_4238_a39a_d67b2cb91d8b.slice/crio-e40cd894e11fc8130fce76ac44637d2f377dc2bd6c0aebf1dbdd1f546b5998b9 WatchSource:0}: Error finding container e40cd894e11fc8130fce76ac44637d2f377dc2bd6c0aebf1dbdd1f546b5998b9: Status 404 returned error can't find the container with id e40cd894e11fc8130fce76ac44637d2f377dc2bd6c0aebf1dbdd1f546b5998b9 Nov 25 18:01:01 crc kubenswrapper[4812]: I1125 18:01:01.430840 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401561-vrmz5" event={"ID":"886ccfe7-797e-4238-a39a-d67b2cb91d8b","Type":"ContainerStarted","Data":"e3bf67f14d689f1198a47a3b174889deaed248b81ebfa46586006b40afe4b5d0"} Nov 25 18:01:01 crc kubenswrapper[4812]: I1125 18:01:01.430909 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401561-vrmz5" event={"ID":"886ccfe7-797e-4238-a39a-d67b2cb91d8b","Type":"ContainerStarted","Data":"e40cd894e11fc8130fce76ac44637d2f377dc2bd6c0aebf1dbdd1f546b5998b9"} Nov 25 18:01:01 crc kubenswrapper[4812]: I1125 18:01:01.456970 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29401561-vrmz5" podStartSLOduration=1.456947391 podStartE2EDuration="1.456947391s" podCreationTimestamp="2025-11-25 18:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 18:01:01.451256389 +0000 UTC m=+4436.291398504" watchObservedRunningTime="2025-11-25 18:01:01.456947391 +0000 UTC m=+4436.297089516" Nov 25 18:01:01 crc kubenswrapper[4812]: I1125 18:01:01.832689 4812 scope.go:117] "RemoveContainer" containerID="07e88d323b14a7c558d194e2f38cb6d35fc84867a6ccc69f5e8941fc613662e9" Nov 25 18:01:01 crc kubenswrapper[4812]: E1125 18:01:01.833562 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 18:01:03 crc kubenswrapper[4812]: I1125 18:01:03.032300 4812 scope.go:117] "RemoveContainer" containerID="e85de82af017d7961d1942a62302bddb2fbc6a8a96d25f6ba11a7298968e5715" Nov 25 18:01:03 crc kubenswrapper[4812]: I1125 18:01:03.448900 4812 generic.go:334] "Generic (PLEG): container finished" podID="886ccfe7-797e-4238-a39a-d67b2cb91d8b" containerID="e3bf67f14d689f1198a47a3b174889deaed248b81ebfa46586006b40afe4b5d0" exitCode=0 Nov 25 18:01:03 crc kubenswrapper[4812]: I1125 18:01:03.448972 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401561-vrmz5" event={"ID":"886ccfe7-797e-4238-a39a-d67b2cb91d8b","Type":"ContainerDied","Data":"e3bf67f14d689f1198a47a3b174889deaed248b81ebfa46586006b40afe4b5d0"} Nov 25 18:01:04 crc kubenswrapper[4812]: I1125 18:01:04.829073 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401561-vrmz5" Nov 25 18:01:04 crc kubenswrapper[4812]: I1125 18:01:04.961989 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/886ccfe7-797e-4238-a39a-d67b2cb91d8b-fernet-keys\") pod \"886ccfe7-797e-4238-a39a-d67b2cb91d8b\" (UID: \"886ccfe7-797e-4238-a39a-d67b2cb91d8b\") " Nov 25 18:01:04 crc kubenswrapper[4812]: I1125 18:01:04.962082 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gl92c\" (UniqueName: \"kubernetes.io/projected/886ccfe7-797e-4238-a39a-d67b2cb91d8b-kube-api-access-gl92c\") pod \"886ccfe7-797e-4238-a39a-d67b2cb91d8b\" (UID: \"886ccfe7-797e-4238-a39a-d67b2cb91d8b\") " Nov 25 18:01:04 crc kubenswrapper[4812]: I1125 18:01:04.962124 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/886ccfe7-797e-4238-a39a-d67b2cb91d8b-combined-ca-bundle\") pod \"886ccfe7-797e-4238-a39a-d67b2cb91d8b\" (UID: \"886ccfe7-797e-4238-a39a-d67b2cb91d8b\") " Nov 25 18:01:04 crc kubenswrapper[4812]: I1125 18:01:04.962215 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/886ccfe7-797e-4238-a39a-d67b2cb91d8b-config-data\") pod \"886ccfe7-797e-4238-a39a-d67b2cb91d8b\" (UID: \"886ccfe7-797e-4238-a39a-d67b2cb91d8b\") " Nov 25 18:01:04 crc kubenswrapper[4812]: I1125 18:01:04.967755 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/886ccfe7-797e-4238-a39a-d67b2cb91d8b-kube-api-access-gl92c" (OuterVolumeSpecName: "kube-api-access-gl92c") pod "886ccfe7-797e-4238-a39a-d67b2cb91d8b" (UID: "886ccfe7-797e-4238-a39a-d67b2cb91d8b"). InnerVolumeSpecName "kube-api-access-gl92c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:01:04 crc kubenswrapper[4812]: I1125 18:01:04.967884 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/886ccfe7-797e-4238-a39a-d67b2cb91d8b-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "886ccfe7-797e-4238-a39a-d67b2cb91d8b" (UID: "886ccfe7-797e-4238-a39a-d67b2cb91d8b"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:01:04 crc kubenswrapper[4812]: I1125 18:01:04.994147 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/886ccfe7-797e-4238-a39a-d67b2cb91d8b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "886ccfe7-797e-4238-a39a-d67b2cb91d8b" (UID: "886ccfe7-797e-4238-a39a-d67b2cb91d8b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:01:05 crc kubenswrapper[4812]: I1125 18:01:05.009003 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/886ccfe7-797e-4238-a39a-d67b2cb91d8b-config-data" (OuterVolumeSpecName: "config-data") pod "886ccfe7-797e-4238-a39a-d67b2cb91d8b" (UID: "886ccfe7-797e-4238-a39a-d67b2cb91d8b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 18:01:05 crc kubenswrapper[4812]: I1125 18:01:05.064178 4812 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/886ccfe7-797e-4238-a39a-d67b2cb91d8b-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 18:01:05 crc kubenswrapper[4812]: I1125 18:01:05.064213 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gl92c\" (UniqueName: \"kubernetes.io/projected/886ccfe7-797e-4238-a39a-d67b2cb91d8b-kube-api-access-gl92c\") on node \"crc\" DevicePath \"\"" Nov 25 18:01:05 crc kubenswrapper[4812]: I1125 18:01:05.064223 4812 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/886ccfe7-797e-4238-a39a-d67b2cb91d8b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 18:01:05 crc kubenswrapper[4812]: I1125 18:01:05.064234 4812 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/886ccfe7-797e-4238-a39a-d67b2cb91d8b-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 18:01:05 crc kubenswrapper[4812]: I1125 18:01:05.473229 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401561-vrmz5" event={"ID":"886ccfe7-797e-4238-a39a-d67b2cb91d8b","Type":"ContainerDied","Data":"e40cd894e11fc8130fce76ac44637d2f377dc2bd6c0aebf1dbdd1f546b5998b9"} Nov 25 18:01:05 crc kubenswrapper[4812]: I1125 18:01:05.473266 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e40cd894e11fc8130fce76ac44637d2f377dc2bd6c0aebf1dbdd1f546b5998b9" Nov 25 18:01:05 crc kubenswrapper[4812]: I1125 18:01:05.473316 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401561-vrmz5" Nov 25 18:01:08 crc kubenswrapper[4812]: I1125 18:01:08.832432 4812 scope.go:117] "RemoveContainer" containerID="9af0f3138c22c859714afa98ee332470e15d86804218511d7c6509c67e1f94e4" Nov 25 18:01:08 crc kubenswrapper[4812]: E1125 18:01:08.833498 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 18:01:11 crc kubenswrapper[4812]: I1125 18:01:11.831756 4812 scope.go:117] "RemoveContainer" containerID="2a103b64182154148e97ac3abbc19ffde76c2a4b0dd45a7bac2798fe5ec6f606" Nov 25 18:01:11 crc kubenswrapper[4812]: E1125 18:01:11.832377 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:01:16 crc kubenswrapper[4812]: I1125 18:01:16.833229 4812 scope.go:117] "RemoveContainer" containerID="07e88d323b14a7c558d194e2f38cb6d35fc84867a6ccc69f5e8941fc613662e9" Nov 25 18:01:16 crc kubenswrapper[4812]: E1125 18:01:16.834865 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 18:01:20 crc kubenswrapper[4812]: I1125 18:01:20.831988 4812 scope.go:117] "RemoveContainer" containerID="9af0f3138c22c859714afa98ee332470e15d86804218511d7c6509c67e1f94e4" Nov 25 18:01:20 crc kubenswrapper[4812]: E1125 18:01:20.833051 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 18:01:23 crc kubenswrapper[4812]: I1125 18:01:23.831490 4812 scope.go:117] "RemoveContainer" containerID="2a103b64182154148e97ac3abbc19ffde76c2a4b0dd45a7bac2798fe5ec6f606" Nov 25 18:01:23 crc kubenswrapper[4812]: E1125 18:01:23.832220 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:01:30 crc kubenswrapper[4812]: I1125 18:01:30.833234 4812 scope.go:117] "RemoveContainer" containerID="07e88d323b14a7c558d194e2f38cb6d35fc84867a6ccc69f5e8941fc613662e9" Nov 25 18:01:30 crc kubenswrapper[4812]: E1125 18:01:30.834118 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 18:01:34 crc kubenswrapper[4812]: I1125 18:01:34.831813 4812 scope.go:117] "RemoveContainer" containerID="9af0f3138c22c859714afa98ee332470e15d86804218511d7c6509c67e1f94e4" Nov 25 18:01:34 crc kubenswrapper[4812]: E1125 18:01:34.833968 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 18:01:35 crc kubenswrapper[4812]: I1125 18:01:35.841822 4812 scope.go:117] "RemoveContainer" containerID="2a103b64182154148e97ac3abbc19ffde76c2a4b0dd45a7bac2798fe5ec6f606" Nov 25 18:01:35 crc kubenswrapper[4812]: E1125 18:01:35.842357 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:01:41 crc kubenswrapper[4812]: I1125 18:01:41.832355 4812 scope.go:117] "RemoveContainer" containerID="07e88d323b14a7c558d194e2f38cb6d35fc84867a6ccc69f5e8941fc613662e9" Nov 25 18:01:41 crc kubenswrapper[4812]: E1125 18:01:41.833159 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 18:01:49 crc kubenswrapper[4812]: I1125 18:01:49.832107 4812 scope.go:117] "RemoveContainer" containerID="9af0f3138c22c859714afa98ee332470e15d86804218511d7c6509c67e1f94e4" Nov 25 18:01:49 crc kubenswrapper[4812]: E1125 18:01:49.833020 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 18:01:50 crc kubenswrapper[4812]: I1125 18:01:50.863004 4812 scope.go:117] "RemoveContainer" containerID="2a103b64182154148e97ac3abbc19ffde76c2a4b0dd45a7bac2798fe5ec6f606" Nov 25 18:01:50 crc kubenswrapper[4812]: E1125 18:01:50.863908 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:01:53 crc kubenswrapper[4812]: I1125 18:01:53.831866 4812 scope.go:117] "RemoveContainer" containerID="07e88d323b14a7c558d194e2f38cb6d35fc84867a6ccc69f5e8941fc613662e9" Nov 25 18:01:53 crc kubenswrapper[4812]: E1125 18:01:53.832705 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 18:02:01 crc kubenswrapper[4812]: I1125 18:02:01.831981 4812 scope.go:117] "RemoveContainer" containerID="2a103b64182154148e97ac3abbc19ffde76c2a4b0dd45a7bac2798fe5ec6f606" Nov 25 18:02:01 crc kubenswrapper[4812]: E1125 18:02:01.833302 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:02:04 crc kubenswrapper[4812]: I1125 18:02:04.832241 4812 scope.go:117] "RemoveContainer" containerID="9af0f3138c22c859714afa98ee332470e15d86804218511d7c6509c67e1f94e4" Nov 25 18:02:04 crc kubenswrapper[4812]: E1125 18:02:04.833810 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 18:02:08 crc kubenswrapper[4812]: I1125 18:02:08.831913 4812 scope.go:117] "RemoveContainer" containerID="07e88d323b14a7c558d194e2f38cb6d35fc84867a6ccc69f5e8941fc613662e9" Nov 25 18:02:08 crc kubenswrapper[4812]: E1125 18:02:08.832777 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 18:02:14 crc kubenswrapper[4812]: I1125 18:02:14.831497 4812 scope.go:117] "RemoveContainer" containerID="2a103b64182154148e97ac3abbc19ffde76c2a4b0dd45a7bac2798fe5ec6f606" Nov 25 18:02:14 crc kubenswrapper[4812]: E1125 18:02:14.832389 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:02:19 crc kubenswrapper[4812]: I1125 18:02:19.832448 4812 scope.go:117] "RemoveContainer" containerID="9af0f3138c22c859714afa98ee332470e15d86804218511d7c6509c67e1f94e4" Nov 25 18:02:19 crc kubenswrapper[4812]: E1125 18:02:19.833525 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 18:02:20 crc kubenswrapper[4812]: I1125 18:02:20.832514 4812 scope.go:117] "RemoveContainer" containerID="07e88d323b14a7c558d194e2f38cb6d35fc84867a6ccc69f5e8941fc613662e9" Nov 25 18:02:20 crc kubenswrapper[4812]: E1125 18:02:20.833159 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 18:02:26 crc kubenswrapper[4812]: I1125 18:02:26.832335 4812 scope.go:117] "RemoveContainer" containerID="2a103b64182154148e97ac3abbc19ffde76c2a4b0dd45a7bac2798fe5ec6f606" Nov 25 18:02:26 crc kubenswrapper[4812]: E1125 18:02:26.833384 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:02:31 crc kubenswrapper[4812]: I1125 18:02:31.834057 4812 scope.go:117] "RemoveContainer" containerID="9af0f3138c22c859714afa98ee332470e15d86804218511d7c6509c67e1f94e4" Nov 25 18:02:31 crc kubenswrapper[4812]: E1125 18:02:31.835256 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 18:02:33 crc kubenswrapper[4812]: I1125 18:02:33.832449 4812 scope.go:117] "RemoveContainer" containerID="07e88d323b14a7c558d194e2f38cb6d35fc84867a6ccc69f5e8941fc613662e9" Nov 25 18:02:33 crc kubenswrapper[4812]: E1125 18:02:33.833112 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 18:02:41 crc kubenswrapper[4812]: I1125 18:02:41.832229 4812 scope.go:117] "RemoveContainer" containerID="2a103b64182154148e97ac3abbc19ffde76c2a4b0dd45a7bac2798fe5ec6f606" Nov 25 18:02:41 crc kubenswrapper[4812]: E1125 18:02:41.833088 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:02:44 crc kubenswrapper[4812]: I1125 18:02:44.832125 4812 scope.go:117] "RemoveContainer" containerID="9af0f3138c22c859714afa98ee332470e15d86804218511d7c6509c67e1f94e4" Nov 25 18:02:44 crc kubenswrapper[4812]: E1125 18:02:44.832874 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 18:02:46 crc kubenswrapper[4812]: I1125 18:02:46.832465 4812 scope.go:117] "RemoveContainer" containerID="07e88d323b14a7c558d194e2f38cb6d35fc84867a6ccc69f5e8941fc613662e9" Nov 25 18:02:46 crc kubenswrapper[4812]: E1125 18:02:46.833691 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 18:02:55 crc kubenswrapper[4812]: I1125 18:02:55.844176 4812 scope.go:117] "RemoveContainer" containerID="2a103b64182154148e97ac3abbc19ffde76c2a4b0dd45a7bac2798fe5ec6f606" Nov 25 18:02:55 crc kubenswrapper[4812]: E1125 18:02:55.845452 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:02:56 crc kubenswrapper[4812]: I1125 18:02:56.831832 4812 scope.go:117] "RemoveContainer" containerID="9af0f3138c22c859714afa98ee332470e15d86804218511d7c6509c67e1f94e4" Nov 25 18:02:56 crc kubenswrapper[4812]: E1125 18:02:56.832294 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 18:02:58 crc kubenswrapper[4812]: I1125 18:02:58.832251 4812 scope.go:117] "RemoveContainer" containerID="07e88d323b14a7c558d194e2f38cb6d35fc84867a6ccc69f5e8941fc613662e9" Nov 25 18:02:58 crc kubenswrapper[4812]: E1125 18:02:58.834211 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 18:03:08 crc kubenswrapper[4812]: I1125 18:03:08.831638 4812 scope.go:117] "RemoveContainer" containerID="2a103b64182154148e97ac3abbc19ffde76c2a4b0dd45a7bac2798fe5ec6f606" Nov 25 18:03:08 crc kubenswrapper[4812]: E1125 18:03:08.832717 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:03:10 crc kubenswrapper[4812]: I1125 18:03:10.832734 4812 scope.go:117] "RemoveContainer" containerID="07e88d323b14a7c558d194e2f38cb6d35fc84867a6ccc69f5e8941fc613662e9" Nov 25 18:03:10 crc kubenswrapper[4812]: E1125 18:03:10.834001 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 18:03:11 crc kubenswrapper[4812]: I1125 18:03:11.832595 4812 scope.go:117] "RemoveContainer" containerID="9af0f3138c22c859714afa98ee332470e15d86804218511d7c6509c67e1f94e4" Nov 25 18:03:11 crc kubenswrapper[4812]: E1125 18:03:11.833407 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 18:03:20 crc kubenswrapper[4812]: I1125 18:03:20.832077 4812 scope.go:117] "RemoveContainer" containerID="2a103b64182154148e97ac3abbc19ffde76c2a4b0dd45a7bac2798fe5ec6f606" Nov 25 18:03:20 crc kubenswrapper[4812]: E1125 18:03:20.832989 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:03:24 crc kubenswrapper[4812]: I1125 18:03:24.831496 4812 scope.go:117] "RemoveContainer" containerID="9af0f3138c22c859714afa98ee332470e15d86804218511d7c6509c67e1f94e4" Nov 25 18:03:24 crc kubenswrapper[4812]: I1125 18:03:24.832168 4812 scope.go:117] "RemoveContainer" containerID="07e88d323b14a7c558d194e2f38cb6d35fc84867a6ccc69f5e8941fc613662e9" Nov 25 18:03:24 crc kubenswrapper[4812]: E1125 18:03:24.832228 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 18:03:24 crc kubenswrapper[4812]: E1125 18:03:24.832708 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 18:03:32 crc kubenswrapper[4812]: I1125 18:03:32.831858 4812 scope.go:117] "RemoveContainer" containerID="2a103b64182154148e97ac3abbc19ffde76c2a4b0dd45a7bac2798fe5ec6f606" Nov 25 18:03:32 crc kubenswrapper[4812]: E1125 18:03:32.832844 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:03:37 crc kubenswrapper[4812]: I1125 18:03:37.832100 4812 scope.go:117] "RemoveContainer" containerID="07e88d323b14a7c558d194e2f38cb6d35fc84867a6ccc69f5e8941fc613662e9" Nov 25 18:03:37 crc kubenswrapper[4812]: I1125 18:03:37.833194 4812 scope.go:117] "RemoveContainer" containerID="9af0f3138c22c859714afa98ee332470e15d86804218511d7c6509c67e1f94e4" Nov 25 18:03:37 crc kubenswrapper[4812]: E1125 18:03:37.833727 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 18:03:37 crc kubenswrapper[4812]: E1125 18:03:37.833847 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 18:03:43 crc kubenswrapper[4812]: I1125 18:03:43.832376 4812 scope.go:117] "RemoveContainer" containerID="2a103b64182154148e97ac3abbc19ffde76c2a4b0dd45a7bac2798fe5ec6f606" Nov 25 18:03:43 crc kubenswrapper[4812]: E1125 18:03:43.833158 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:03:48 crc kubenswrapper[4812]: I1125 18:03:48.831271 4812 scope.go:117] "RemoveContainer" containerID="9af0f3138c22c859714afa98ee332470e15d86804218511d7c6509c67e1f94e4" Nov 25 18:03:48 crc kubenswrapper[4812]: E1125 18:03:48.831913 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 18:03:52 crc kubenswrapper[4812]: I1125 18:03:52.831319 4812 scope.go:117] "RemoveContainer" containerID="07e88d323b14a7c558d194e2f38cb6d35fc84867a6ccc69f5e8941fc613662e9" Nov 25 18:03:52 crc kubenswrapper[4812]: E1125 18:03:52.831939 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 18:03:57 crc kubenswrapper[4812]: I1125 18:03:57.833106 4812 scope.go:117] "RemoveContainer" containerID="2a103b64182154148e97ac3abbc19ffde76c2a4b0dd45a7bac2798fe5ec6f606" Nov 25 18:03:59 crc kubenswrapper[4812]: I1125 18:03:59.286192 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"8af2d4e3-790f-4ab4-92e8-1c0a083b9531","Type":"ContainerStarted","Data":"ecc0184183ac35ed28d3bf0287ae00e97beefc4fd633395d2c10f1b0faea066b"} Nov 25 18:04:00 crc kubenswrapper[4812]: I1125 18:04:00.297702 4812 generic.go:334] "Generic (PLEG): container finished" podID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" containerID="ecc0184183ac35ed28d3bf0287ae00e97beefc4fd633395d2c10f1b0faea066b" exitCode=1 Nov 25 18:04:00 crc kubenswrapper[4812]: I1125 18:04:00.297814 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"8af2d4e3-790f-4ab4-92e8-1c0a083b9531","Type":"ContainerDied","Data":"ecc0184183ac35ed28d3bf0287ae00e97beefc4fd633395d2c10f1b0faea066b"} Nov 25 18:04:00 crc kubenswrapper[4812]: I1125 18:04:00.298206 4812 scope.go:117] "RemoveContainer" containerID="2a103b64182154148e97ac3abbc19ffde76c2a4b0dd45a7bac2798fe5ec6f606" Nov 25 18:04:00 crc kubenswrapper[4812]: I1125 18:04:00.299252 4812 scope.go:117] "RemoveContainer" containerID="ecc0184183ac35ed28d3bf0287ae00e97beefc4fd633395d2c10f1b0faea066b" Nov 25 18:04:00 crc kubenswrapper[4812]: E1125 18:04:00.299570 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:04:02 crc kubenswrapper[4812]: I1125 18:04:02.832719 4812 scope.go:117] "RemoveContainer" containerID="9af0f3138c22c859714afa98ee332470e15d86804218511d7c6509c67e1f94e4" Nov 25 18:04:02 crc kubenswrapper[4812]: E1125 18:04:02.833836 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 18:04:05 crc kubenswrapper[4812]: I1125 18:04:05.164220 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 25 18:04:05 crc kubenswrapper[4812]: I1125 18:04:05.165329 4812 scope.go:117] "RemoveContainer" containerID="ecc0184183ac35ed28d3bf0287ae00e97beefc4fd633395d2c10f1b0faea066b" Nov 25 18:04:05 crc kubenswrapper[4812]: E1125 18:04:05.165781 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:04:05 crc kubenswrapper[4812]: I1125 18:04:05.166450 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 25 18:04:05 crc kubenswrapper[4812]: I1125 18:04:05.166567 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 25 18:04:05 crc kubenswrapper[4812]: I1125 18:04:05.355383 4812 scope.go:117] "RemoveContainer" containerID="ecc0184183ac35ed28d3bf0287ae00e97beefc4fd633395d2c10f1b0faea066b" Nov 25 18:04:05 crc kubenswrapper[4812]: E1125 18:04:05.355888 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:04:06 crc kubenswrapper[4812]: I1125 18:04:06.831767 4812 scope.go:117] "RemoveContainer" containerID="07e88d323b14a7c558d194e2f38cb6d35fc84867a6ccc69f5e8941fc613662e9" Nov 25 18:04:06 crc kubenswrapper[4812]: E1125 18:04:06.832278 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 18:04:17 crc kubenswrapper[4812]: I1125 18:04:17.832781 4812 scope.go:117] "RemoveContainer" containerID="9af0f3138c22c859714afa98ee332470e15d86804218511d7c6509c67e1f94e4" Nov 25 18:04:17 crc kubenswrapper[4812]: E1125 18:04:17.833485 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 18:04:18 crc kubenswrapper[4812]: I1125 18:04:18.832809 4812 scope.go:117] "RemoveContainer" containerID="07e88d323b14a7c558d194e2f38cb6d35fc84867a6ccc69f5e8941fc613662e9" Nov 25 18:04:18 crc kubenswrapper[4812]: E1125 18:04:18.833401 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 18:04:20 crc kubenswrapper[4812]: I1125 18:04:20.832068 4812 scope.go:117] "RemoveContainer" containerID="ecc0184183ac35ed28d3bf0287ae00e97beefc4fd633395d2c10f1b0faea066b" Nov 25 18:04:20 crc kubenswrapper[4812]: E1125 18:04:20.832395 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:04:30 crc kubenswrapper[4812]: I1125 18:04:30.832074 4812 scope.go:117] "RemoveContainer" containerID="07e88d323b14a7c558d194e2f38cb6d35fc84867a6ccc69f5e8941fc613662e9" Nov 25 18:04:30 crc kubenswrapper[4812]: I1125 18:04:30.832769 4812 scope.go:117] "RemoveContainer" containerID="9af0f3138c22c859714afa98ee332470e15d86804218511d7c6509c67e1f94e4" Nov 25 18:04:30 crc kubenswrapper[4812]: E1125 18:04:30.832998 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 18:04:30 crc kubenswrapper[4812]: E1125 18:04:30.833372 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 18:04:31 crc kubenswrapper[4812]: I1125 18:04:31.833252 4812 scope.go:117] "RemoveContainer" containerID="ecc0184183ac35ed28d3bf0287ae00e97beefc4fd633395d2c10f1b0faea066b" Nov 25 18:04:31 crc kubenswrapper[4812]: E1125 18:04:31.833742 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:04:44 crc kubenswrapper[4812]: I1125 18:04:44.831470 4812 scope.go:117] "RemoveContainer" containerID="07e88d323b14a7c558d194e2f38cb6d35fc84867a6ccc69f5e8941fc613662e9" Nov 25 18:04:44 crc kubenswrapper[4812]: I1125 18:04:44.832036 4812 scope.go:117] "RemoveContainer" containerID="9af0f3138c22c859714afa98ee332470e15d86804218511d7c6509c67e1f94e4" Nov 25 18:04:44 crc kubenswrapper[4812]: I1125 18:04:44.832084 4812 scope.go:117] "RemoveContainer" containerID="ecc0184183ac35ed28d3bf0287ae00e97beefc4fd633395d2c10f1b0faea066b" Nov 25 18:04:44 crc kubenswrapper[4812]: E1125 18:04:44.832229 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 18:04:44 crc kubenswrapper[4812]: E1125 18:04:44.832296 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 18:04:44 crc kubenswrapper[4812]: E1125 18:04:44.832581 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:04:51 crc kubenswrapper[4812]: I1125 18:04:51.900777 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5qzjk"] Nov 25 18:04:51 crc kubenswrapper[4812]: E1125 18:04:51.906872 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="886ccfe7-797e-4238-a39a-d67b2cb91d8b" containerName="keystone-cron" Nov 25 18:04:51 crc kubenswrapper[4812]: I1125 18:04:51.906901 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="886ccfe7-797e-4238-a39a-d67b2cb91d8b" containerName="keystone-cron" Nov 25 18:04:51 crc kubenswrapper[4812]: I1125 18:04:51.911158 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="886ccfe7-797e-4238-a39a-d67b2cb91d8b" containerName="keystone-cron" Nov 25 18:04:51 crc kubenswrapper[4812]: I1125 18:04:51.918866 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5qzjk" Nov 25 18:04:51 crc kubenswrapper[4812]: I1125 18:04:51.966588 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5qzjk"] Nov 25 18:04:52 crc kubenswrapper[4812]: I1125 18:04:52.023278 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89c81428-b2ee-4ffd-8386-e35f6f08e5fb-utilities\") pod \"redhat-operators-5qzjk\" (UID: \"89c81428-b2ee-4ffd-8386-e35f6f08e5fb\") " pod="openshift-marketplace/redhat-operators-5qzjk" Nov 25 18:04:52 crc kubenswrapper[4812]: I1125 18:04:52.023577 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nkxwh\" (UniqueName: \"kubernetes.io/projected/89c81428-b2ee-4ffd-8386-e35f6f08e5fb-kube-api-access-nkxwh\") pod \"redhat-operators-5qzjk\" (UID: \"89c81428-b2ee-4ffd-8386-e35f6f08e5fb\") " pod="openshift-marketplace/redhat-operators-5qzjk" Nov 25 18:04:52 crc kubenswrapper[4812]: I1125 18:04:52.023720 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89c81428-b2ee-4ffd-8386-e35f6f08e5fb-catalog-content\") pod \"redhat-operators-5qzjk\" (UID: \"89c81428-b2ee-4ffd-8386-e35f6f08e5fb\") " pod="openshift-marketplace/redhat-operators-5qzjk" Nov 25 18:04:52 crc kubenswrapper[4812]: I1125 18:04:52.125782 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89c81428-b2ee-4ffd-8386-e35f6f08e5fb-utilities\") pod \"redhat-operators-5qzjk\" (UID: \"89c81428-b2ee-4ffd-8386-e35f6f08e5fb\") " pod="openshift-marketplace/redhat-operators-5qzjk" Nov 25 18:04:52 crc kubenswrapper[4812]: I1125 18:04:52.125943 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nkxwh\" (UniqueName: \"kubernetes.io/projected/89c81428-b2ee-4ffd-8386-e35f6f08e5fb-kube-api-access-nkxwh\") pod \"redhat-operators-5qzjk\" (UID: \"89c81428-b2ee-4ffd-8386-e35f6f08e5fb\") " pod="openshift-marketplace/redhat-operators-5qzjk" Nov 25 18:04:52 crc kubenswrapper[4812]: I1125 18:04:52.126039 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89c81428-b2ee-4ffd-8386-e35f6f08e5fb-catalog-content\") pod \"redhat-operators-5qzjk\" (UID: \"89c81428-b2ee-4ffd-8386-e35f6f08e5fb\") " pod="openshift-marketplace/redhat-operators-5qzjk" Nov 25 18:04:52 crc kubenswrapper[4812]: I1125 18:04:52.126286 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89c81428-b2ee-4ffd-8386-e35f6f08e5fb-utilities\") pod \"redhat-operators-5qzjk\" (UID: \"89c81428-b2ee-4ffd-8386-e35f6f08e5fb\") " pod="openshift-marketplace/redhat-operators-5qzjk" Nov 25 18:04:52 crc kubenswrapper[4812]: I1125 18:04:52.126586 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89c81428-b2ee-4ffd-8386-e35f6f08e5fb-catalog-content\") pod \"redhat-operators-5qzjk\" (UID: \"89c81428-b2ee-4ffd-8386-e35f6f08e5fb\") " pod="openshift-marketplace/redhat-operators-5qzjk" Nov 25 18:04:52 crc kubenswrapper[4812]: I1125 18:04:52.149154 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nkxwh\" (UniqueName: \"kubernetes.io/projected/89c81428-b2ee-4ffd-8386-e35f6f08e5fb-kube-api-access-nkxwh\") pod \"redhat-operators-5qzjk\" (UID: \"89c81428-b2ee-4ffd-8386-e35f6f08e5fb\") " pod="openshift-marketplace/redhat-operators-5qzjk" Nov 25 18:04:52 crc kubenswrapper[4812]: I1125 18:04:52.270346 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5qzjk" Nov 25 18:04:52 crc kubenswrapper[4812]: I1125 18:04:52.762039 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5qzjk"] Nov 25 18:04:52 crc kubenswrapper[4812]: W1125 18:04:52.765103 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod89c81428_b2ee_4ffd_8386_e35f6f08e5fb.slice/crio-cc0dd761f03404dddb2db7fa3c8df9bec8e1a6fc561b775b0351d6a762d8c168 WatchSource:0}: Error finding container cc0dd761f03404dddb2db7fa3c8df9bec8e1a6fc561b775b0351d6a762d8c168: Status 404 returned error can't find the container with id cc0dd761f03404dddb2db7fa3c8df9bec8e1a6fc561b775b0351d6a762d8c168 Nov 25 18:04:52 crc kubenswrapper[4812]: I1125 18:04:52.817363 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5qzjk" event={"ID":"89c81428-b2ee-4ffd-8386-e35f6f08e5fb","Type":"ContainerStarted","Data":"cc0dd761f03404dddb2db7fa3c8df9bec8e1a6fc561b775b0351d6a762d8c168"} Nov 25 18:04:53 crc kubenswrapper[4812]: I1125 18:04:53.831561 4812 generic.go:334] "Generic (PLEG): container finished" podID="89c81428-b2ee-4ffd-8386-e35f6f08e5fb" containerID="7475aaf690475ad576407dfedd7d5ab3f8312d25ed002d241065ed3878d40309" exitCode=0 Nov 25 18:04:53 crc kubenswrapper[4812]: I1125 18:04:53.843187 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5qzjk" event={"ID":"89c81428-b2ee-4ffd-8386-e35f6f08e5fb","Type":"ContainerDied","Data":"7475aaf690475ad576407dfedd7d5ab3f8312d25ed002d241065ed3878d40309"} Nov 25 18:04:54 crc kubenswrapper[4812]: I1125 18:04:54.842051 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5qzjk" event={"ID":"89c81428-b2ee-4ffd-8386-e35f6f08e5fb","Type":"ContainerStarted","Data":"7e7827f3f730d819b86e592cd8013a876f4e5bda3a663502e2120dc944278312"} Nov 25 18:04:55 crc kubenswrapper[4812]: I1125 18:04:55.848114 4812 scope.go:117] "RemoveContainer" containerID="ecc0184183ac35ed28d3bf0287ae00e97beefc4fd633395d2c10f1b0faea066b" Nov 25 18:04:55 crc kubenswrapper[4812]: E1125 18:04:55.848608 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:04:55 crc kubenswrapper[4812]: I1125 18:04:55.856102 4812 generic.go:334] "Generic (PLEG): container finished" podID="89c81428-b2ee-4ffd-8386-e35f6f08e5fb" containerID="7e7827f3f730d819b86e592cd8013a876f4e5bda3a663502e2120dc944278312" exitCode=0 Nov 25 18:04:55 crc kubenswrapper[4812]: I1125 18:04:55.856156 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5qzjk" event={"ID":"89c81428-b2ee-4ffd-8386-e35f6f08e5fb","Type":"ContainerDied","Data":"7e7827f3f730d819b86e592cd8013a876f4e5bda3a663502e2120dc944278312"} Nov 25 18:04:56 crc kubenswrapper[4812]: I1125 18:04:56.832279 4812 scope.go:117] "RemoveContainer" containerID="9af0f3138c22c859714afa98ee332470e15d86804218511d7c6509c67e1f94e4" Nov 25 18:04:56 crc kubenswrapper[4812]: E1125 18:04:56.833368 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lcgpx_openshift-machine-config-operator(8ed911cf-2139-4b12-84ba-af635585ba29)\"" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" Nov 25 18:04:56 crc kubenswrapper[4812]: I1125 18:04:56.869256 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5qzjk" event={"ID":"89c81428-b2ee-4ffd-8386-e35f6f08e5fb","Type":"ContainerStarted","Data":"ced4e1e30f57b024f46e490c6fb99cad87236240ae687cf2bc16bd138f22e1bf"} Nov 25 18:04:56 crc kubenswrapper[4812]: I1125 18:04:56.901448 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5qzjk" podStartSLOduration=3.471268373 podStartE2EDuration="5.901422426s" podCreationTimestamp="2025-11-25 18:04:51 +0000 UTC" firstStartedPulling="2025-11-25 18:04:53.837313563 +0000 UTC m=+4668.677455668" lastFinishedPulling="2025-11-25 18:04:56.267467616 +0000 UTC m=+4671.107609721" observedRunningTime="2025-11-25 18:04:56.8881749 +0000 UTC m=+4671.728317015" watchObservedRunningTime="2025-11-25 18:04:56.901422426 +0000 UTC m=+4671.741564551" Nov 25 18:04:58 crc kubenswrapper[4812]: I1125 18:04:58.832003 4812 scope.go:117] "RemoveContainer" containerID="07e88d323b14a7c558d194e2f38cb6d35fc84867a6ccc69f5e8941fc613662e9" Nov 25 18:04:59 crc kubenswrapper[4812]: I1125 18:04:59.904163 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"6d2f6681-d1d6-45e9-b0f4-65209caf0069","Type":"ContainerStarted","Data":"3c2116323c7b46b277a1dfb91c32043c4f95b03654aa19daa180f605a9ebbb98"} Nov 25 18:04:59 crc kubenswrapper[4812]: I1125 18:04:59.904552 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/manila-api-0" Nov 25 18:05:02 crc kubenswrapper[4812]: I1125 18:05:02.271450 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5qzjk" Nov 25 18:05:02 crc kubenswrapper[4812]: I1125 18:05:02.271814 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5qzjk" Nov 25 18:05:02 crc kubenswrapper[4812]: I1125 18:05:02.322273 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5qzjk" Nov 25 18:05:02 crc kubenswrapper[4812]: I1125 18:05:02.976495 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5qzjk" Nov 25 18:05:03 crc kubenswrapper[4812]: I1125 18:05:03.035643 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5qzjk"] Nov 25 18:05:04 crc kubenswrapper[4812]: I1125 18:05:04.957891 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-5qzjk" podUID="89c81428-b2ee-4ffd-8386-e35f6f08e5fb" containerName="registry-server" containerID="cri-o://ced4e1e30f57b024f46e490c6fb99cad87236240ae687cf2bc16bd138f22e1bf" gracePeriod=2 Nov 25 18:05:04 crc kubenswrapper[4812]: I1125 18:05:04.970216 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-tl5cs"] Nov 25 18:05:04 crc kubenswrapper[4812]: I1125 18:05:04.972546 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tl5cs" Nov 25 18:05:04 crc kubenswrapper[4812]: I1125 18:05:04.982710 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tl5cs"] Nov 25 18:05:05 crc kubenswrapper[4812]: I1125 18:05:05.106077 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/efd92a41-34d1-485e-b58a-02886bf88032-utilities\") pod \"redhat-marketplace-tl5cs\" (UID: \"efd92a41-34d1-485e-b58a-02886bf88032\") " pod="openshift-marketplace/redhat-marketplace-tl5cs" Nov 25 18:05:05 crc kubenswrapper[4812]: I1125 18:05:05.106578 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/efd92a41-34d1-485e-b58a-02886bf88032-catalog-content\") pod \"redhat-marketplace-tl5cs\" (UID: \"efd92a41-34d1-485e-b58a-02886bf88032\") " pod="openshift-marketplace/redhat-marketplace-tl5cs" Nov 25 18:05:05 crc kubenswrapper[4812]: I1125 18:05:05.106617 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7pj88\" (UniqueName: \"kubernetes.io/projected/efd92a41-34d1-485e-b58a-02886bf88032-kube-api-access-7pj88\") pod \"redhat-marketplace-tl5cs\" (UID: \"efd92a41-34d1-485e-b58a-02886bf88032\") " pod="openshift-marketplace/redhat-marketplace-tl5cs" Nov 25 18:05:05 crc kubenswrapper[4812]: I1125 18:05:05.208814 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/efd92a41-34d1-485e-b58a-02886bf88032-utilities\") pod \"redhat-marketplace-tl5cs\" (UID: \"efd92a41-34d1-485e-b58a-02886bf88032\") " pod="openshift-marketplace/redhat-marketplace-tl5cs" Nov 25 18:05:05 crc kubenswrapper[4812]: I1125 18:05:05.208909 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/efd92a41-34d1-485e-b58a-02886bf88032-catalog-content\") pod \"redhat-marketplace-tl5cs\" (UID: \"efd92a41-34d1-485e-b58a-02886bf88032\") " pod="openshift-marketplace/redhat-marketplace-tl5cs" Nov 25 18:05:05 crc kubenswrapper[4812]: I1125 18:05:05.208944 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7pj88\" (UniqueName: \"kubernetes.io/projected/efd92a41-34d1-485e-b58a-02886bf88032-kube-api-access-7pj88\") pod \"redhat-marketplace-tl5cs\" (UID: \"efd92a41-34d1-485e-b58a-02886bf88032\") " pod="openshift-marketplace/redhat-marketplace-tl5cs" Nov 25 18:05:05 crc kubenswrapper[4812]: I1125 18:05:05.209992 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/efd92a41-34d1-485e-b58a-02886bf88032-utilities\") pod \"redhat-marketplace-tl5cs\" (UID: \"efd92a41-34d1-485e-b58a-02886bf88032\") " pod="openshift-marketplace/redhat-marketplace-tl5cs" Nov 25 18:05:05 crc kubenswrapper[4812]: I1125 18:05:05.210428 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/efd92a41-34d1-485e-b58a-02886bf88032-catalog-content\") pod \"redhat-marketplace-tl5cs\" (UID: \"efd92a41-34d1-485e-b58a-02886bf88032\") " pod="openshift-marketplace/redhat-marketplace-tl5cs" Nov 25 18:05:05 crc kubenswrapper[4812]: I1125 18:05:05.230141 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7pj88\" (UniqueName: \"kubernetes.io/projected/efd92a41-34d1-485e-b58a-02886bf88032-kube-api-access-7pj88\") pod \"redhat-marketplace-tl5cs\" (UID: \"efd92a41-34d1-485e-b58a-02886bf88032\") " pod="openshift-marketplace/redhat-marketplace-tl5cs" Nov 25 18:05:05 crc kubenswrapper[4812]: I1125 18:05:05.292326 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tl5cs" Nov 25 18:05:05 crc kubenswrapper[4812]: I1125 18:05:05.470190 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5qzjk" Nov 25 18:05:05 crc kubenswrapper[4812]: I1125 18:05:05.633033 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nkxwh\" (UniqueName: \"kubernetes.io/projected/89c81428-b2ee-4ffd-8386-e35f6f08e5fb-kube-api-access-nkxwh\") pod \"89c81428-b2ee-4ffd-8386-e35f6f08e5fb\" (UID: \"89c81428-b2ee-4ffd-8386-e35f6f08e5fb\") " Nov 25 18:05:05 crc kubenswrapper[4812]: I1125 18:05:05.633121 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89c81428-b2ee-4ffd-8386-e35f6f08e5fb-catalog-content\") pod \"89c81428-b2ee-4ffd-8386-e35f6f08e5fb\" (UID: \"89c81428-b2ee-4ffd-8386-e35f6f08e5fb\") " Nov 25 18:05:05 crc kubenswrapper[4812]: I1125 18:05:05.633292 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89c81428-b2ee-4ffd-8386-e35f6f08e5fb-utilities\") pod \"89c81428-b2ee-4ffd-8386-e35f6f08e5fb\" (UID: \"89c81428-b2ee-4ffd-8386-e35f6f08e5fb\") " Nov 25 18:05:05 crc kubenswrapper[4812]: I1125 18:05:05.634102 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/89c81428-b2ee-4ffd-8386-e35f6f08e5fb-utilities" (OuterVolumeSpecName: "utilities") pod "89c81428-b2ee-4ffd-8386-e35f6f08e5fb" (UID: "89c81428-b2ee-4ffd-8386-e35f6f08e5fb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:05:05 crc kubenswrapper[4812]: I1125 18:05:05.637840 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/89c81428-b2ee-4ffd-8386-e35f6f08e5fb-kube-api-access-nkxwh" (OuterVolumeSpecName: "kube-api-access-nkxwh") pod "89c81428-b2ee-4ffd-8386-e35f6f08e5fb" (UID: "89c81428-b2ee-4ffd-8386-e35f6f08e5fb"). InnerVolumeSpecName "kube-api-access-nkxwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:05:05 crc kubenswrapper[4812]: I1125 18:05:05.735603 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nkxwh\" (UniqueName: \"kubernetes.io/projected/89c81428-b2ee-4ffd-8386-e35f6f08e5fb-kube-api-access-nkxwh\") on node \"crc\" DevicePath \"\"" Nov 25 18:05:05 crc kubenswrapper[4812]: I1125 18:05:05.735649 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89c81428-b2ee-4ffd-8386-e35f6f08e5fb-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:05:05 crc kubenswrapper[4812]: I1125 18:05:05.782323 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tl5cs"] Nov 25 18:05:05 crc kubenswrapper[4812]: I1125 18:05:05.964782 4812 generic.go:334] "Generic (PLEG): container finished" podID="89c81428-b2ee-4ffd-8386-e35f6f08e5fb" containerID="ced4e1e30f57b024f46e490c6fb99cad87236240ae687cf2bc16bd138f22e1bf" exitCode=0 Nov 25 18:05:05 crc kubenswrapper[4812]: I1125 18:05:05.964849 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5qzjk" event={"ID":"89c81428-b2ee-4ffd-8386-e35f6f08e5fb","Type":"ContainerDied","Data":"ced4e1e30f57b024f46e490c6fb99cad87236240ae687cf2bc16bd138f22e1bf"} Nov 25 18:05:05 crc kubenswrapper[4812]: I1125 18:05:05.964877 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5qzjk" event={"ID":"89c81428-b2ee-4ffd-8386-e35f6f08e5fb","Type":"ContainerDied","Data":"cc0dd761f03404dddb2db7fa3c8df9bec8e1a6fc561b775b0351d6a762d8c168"} Nov 25 18:05:05 crc kubenswrapper[4812]: I1125 18:05:05.964896 4812 scope.go:117] "RemoveContainer" containerID="ced4e1e30f57b024f46e490c6fb99cad87236240ae687cf2bc16bd138f22e1bf" Nov 25 18:05:05 crc kubenswrapper[4812]: I1125 18:05:05.965054 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5qzjk" Nov 25 18:05:05 crc kubenswrapper[4812]: I1125 18:05:05.970111 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tl5cs" event={"ID":"efd92a41-34d1-485e-b58a-02886bf88032","Type":"ContainerStarted","Data":"0134fbdf63a6358a1b2f995f8f121d7f6529ff7415f0b9e1f6f4742fe0a27ce9"} Nov 25 18:05:05 crc kubenswrapper[4812]: I1125 18:05:05.994414 4812 scope.go:117] "RemoveContainer" containerID="7e7827f3f730d819b86e592cd8013a876f4e5bda3a663502e2120dc944278312" Nov 25 18:05:06 crc kubenswrapper[4812]: I1125 18:05:06.016635 4812 scope.go:117] "RemoveContainer" containerID="7475aaf690475ad576407dfedd7d5ab3f8312d25ed002d241065ed3878d40309" Nov 25 18:05:06 crc kubenswrapper[4812]: I1125 18:05:06.039715 4812 scope.go:117] "RemoveContainer" containerID="ced4e1e30f57b024f46e490c6fb99cad87236240ae687cf2bc16bd138f22e1bf" Nov 25 18:05:06 crc kubenswrapper[4812]: E1125 18:05:06.040614 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ced4e1e30f57b024f46e490c6fb99cad87236240ae687cf2bc16bd138f22e1bf\": container with ID starting with ced4e1e30f57b024f46e490c6fb99cad87236240ae687cf2bc16bd138f22e1bf not found: ID does not exist" containerID="ced4e1e30f57b024f46e490c6fb99cad87236240ae687cf2bc16bd138f22e1bf" Nov 25 18:05:06 crc kubenswrapper[4812]: I1125 18:05:06.040655 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ced4e1e30f57b024f46e490c6fb99cad87236240ae687cf2bc16bd138f22e1bf"} err="failed to get container status \"ced4e1e30f57b024f46e490c6fb99cad87236240ae687cf2bc16bd138f22e1bf\": rpc error: code = NotFound desc = could not find container \"ced4e1e30f57b024f46e490c6fb99cad87236240ae687cf2bc16bd138f22e1bf\": container with ID starting with ced4e1e30f57b024f46e490c6fb99cad87236240ae687cf2bc16bd138f22e1bf not found: ID does not exist" Nov 25 18:05:06 crc kubenswrapper[4812]: I1125 18:05:06.040676 4812 scope.go:117] "RemoveContainer" containerID="7e7827f3f730d819b86e592cd8013a876f4e5bda3a663502e2120dc944278312" Nov 25 18:05:06 crc kubenswrapper[4812]: E1125 18:05:06.040988 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7e7827f3f730d819b86e592cd8013a876f4e5bda3a663502e2120dc944278312\": container with ID starting with 7e7827f3f730d819b86e592cd8013a876f4e5bda3a663502e2120dc944278312 not found: ID does not exist" containerID="7e7827f3f730d819b86e592cd8013a876f4e5bda3a663502e2120dc944278312" Nov 25 18:05:06 crc kubenswrapper[4812]: I1125 18:05:06.041031 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e7827f3f730d819b86e592cd8013a876f4e5bda3a663502e2120dc944278312"} err="failed to get container status \"7e7827f3f730d819b86e592cd8013a876f4e5bda3a663502e2120dc944278312\": rpc error: code = NotFound desc = could not find container \"7e7827f3f730d819b86e592cd8013a876f4e5bda3a663502e2120dc944278312\": container with ID starting with 7e7827f3f730d819b86e592cd8013a876f4e5bda3a663502e2120dc944278312 not found: ID does not exist" Nov 25 18:05:06 crc kubenswrapper[4812]: I1125 18:05:06.041045 4812 scope.go:117] "RemoveContainer" containerID="7475aaf690475ad576407dfedd7d5ab3f8312d25ed002d241065ed3878d40309" Nov 25 18:05:06 crc kubenswrapper[4812]: E1125 18:05:06.041373 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7475aaf690475ad576407dfedd7d5ab3f8312d25ed002d241065ed3878d40309\": container with ID starting with 7475aaf690475ad576407dfedd7d5ab3f8312d25ed002d241065ed3878d40309 not found: ID does not exist" containerID="7475aaf690475ad576407dfedd7d5ab3f8312d25ed002d241065ed3878d40309" Nov 25 18:05:06 crc kubenswrapper[4812]: I1125 18:05:06.041395 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7475aaf690475ad576407dfedd7d5ab3f8312d25ed002d241065ed3878d40309"} err="failed to get container status \"7475aaf690475ad576407dfedd7d5ab3f8312d25ed002d241065ed3878d40309\": rpc error: code = NotFound desc = could not find container \"7475aaf690475ad576407dfedd7d5ab3f8312d25ed002d241065ed3878d40309\": container with ID starting with 7475aaf690475ad576407dfedd7d5ab3f8312d25ed002d241065ed3878d40309 not found: ID does not exist" Nov 25 18:05:06 crc kubenswrapper[4812]: I1125 18:05:06.166893 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/89c81428-b2ee-4ffd-8386-e35f6f08e5fb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "89c81428-b2ee-4ffd-8386-e35f6f08e5fb" (UID: "89c81428-b2ee-4ffd-8386-e35f6f08e5fb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:05:06 crc kubenswrapper[4812]: I1125 18:05:06.246469 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89c81428-b2ee-4ffd-8386-e35f6f08e5fb-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:05:06 crc kubenswrapper[4812]: I1125 18:05:06.304788 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5qzjk"] Nov 25 18:05:06 crc kubenswrapper[4812]: I1125 18:05:06.315875 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-5qzjk"] Nov 25 18:05:06 crc kubenswrapper[4812]: I1125 18:05:06.985628 4812 generic.go:334] "Generic (PLEG): container finished" podID="efd92a41-34d1-485e-b58a-02886bf88032" containerID="aab1e5698c61610aca8b7a108c7aab74e6e7bb4e799d571965f926134ee0cbc9" exitCode=0 Nov 25 18:05:06 crc kubenswrapper[4812]: I1125 18:05:06.985709 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tl5cs" event={"ID":"efd92a41-34d1-485e-b58a-02886bf88032","Type":"ContainerDied","Data":"aab1e5698c61610aca8b7a108c7aab74e6e7bb4e799d571965f926134ee0cbc9"} Nov 25 18:05:07 crc kubenswrapper[4812]: I1125 18:05:07.850934 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="89c81428-b2ee-4ffd-8386-e35f6f08e5fb" path="/var/lib/kubelet/pods/89c81428-b2ee-4ffd-8386-e35f6f08e5fb/volumes" Nov 25 18:05:08 crc kubenswrapper[4812]: I1125 18:05:08.000962 4812 generic.go:334] "Generic (PLEG): container finished" podID="efd92a41-34d1-485e-b58a-02886bf88032" containerID="69484863fd67c6dacca63301275034007eea5f3f923709e3c6070b137bad1a92" exitCode=0 Nov 25 18:05:08 crc kubenswrapper[4812]: I1125 18:05:08.001004 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tl5cs" event={"ID":"efd92a41-34d1-485e-b58a-02886bf88032","Type":"ContainerDied","Data":"69484863fd67c6dacca63301275034007eea5f3f923709e3c6070b137bad1a92"} Nov 25 18:05:09 crc kubenswrapper[4812]: I1125 18:05:09.013133 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tl5cs" event={"ID":"efd92a41-34d1-485e-b58a-02886bf88032","Type":"ContainerStarted","Data":"eb624ac1b15c25095d3077262f939a4af9b71637b27e1db953943295a79e4568"} Nov 25 18:05:09 crc kubenswrapper[4812]: I1125 18:05:09.045004 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-tl5cs" podStartSLOduration=3.6431671960000003 podStartE2EDuration="5.044984123s" podCreationTimestamp="2025-11-25 18:05:04 +0000 UTC" firstStartedPulling="2025-11-25 18:05:06.989863615 +0000 UTC m=+4681.830005710" lastFinishedPulling="2025-11-25 18:05:08.391680542 +0000 UTC m=+4683.231822637" observedRunningTime="2025-11-25 18:05:09.034739157 +0000 UTC m=+4683.874881272" watchObservedRunningTime="2025-11-25 18:05:09.044984123 +0000 UTC m=+4683.885126228" Nov 25 18:05:10 crc kubenswrapper[4812]: I1125 18:05:10.832065 4812 scope.go:117] "RemoveContainer" containerID="ecc0184183ac35ed28d3bf0287ae00e97beefc4fd633395d2c10f1b0faea066b" Nov 25 18:05:10 crc kubenswrapper[4812]: I1125 18:05:10.832382 4812 scope.go:117] "RemoveContainer" containerID="9af0f3138c22c859714afa98ee332470e15d86804218511d7c6509c67e1f94e4" Nov 25 18:05:10 crc kubenswrapper[4812]: E1125 18:05:10.832551 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:05:11 crc kubenswrapper[4812]: I1125 18:05:11.040205 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" event={"ID":"8ed911cf-2139-4b12-84ba-af635585ba29","Type":"ContainerStarted","Data":"1ac1e241c603a13ec9553ce63660deb35c2c2405ad96fbe133a2b2439aa7001e"} Nov 25 18:05:11 crc kubenswrapper[4812]: I1125 18:05:11.296111 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 18:05:11 crc kubenswrapper[4812]: I1125 18:05:11.349272 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 18:05:15 crc kubenswrapper[4812]: I1125 18:05:15.294590 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-tl5cs" Nov 25 18:05:15 crc kubenswrapper[4812]: I1125 18:05:15.295003 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-tl5cs" Nov 25 18:05:15 crc kubenswrapper[4812]: I1125 18:05:15.347583 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-tl5cs" Nov 25 18:05:16 crc kubenswrapper[4812]: I1125 18:05:16.156754 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-tl5cs" Nov 25 18:05:16 crc kubenswrapper[4812]: I1125 18:05:16.214319 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tl5cs"] Nov 25 18:05:18 crc kubenswrapper[4812]: I1125 18:05:18.122293 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-tl5cs" podUID="efd92a41-34d1-485e-b58a-02886bf88032" containerName="registry-server" containerID="cri-o://eb624ac1b15c25095d3077262f939a4af9b71637b27e1db953943295a79e4568" gracePeriod=2 Nov 25 18:05:18 crc kubenswrapper[4812]: I1125 18:05:18.558059 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tl5cs" Nov 25 18:05:18 crc kubenswrapper[4812]: I1125 18:05:18.726019 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7pj88\" (UniqueName: \"kubernetes.io/projected/efd92a41-34d1-485e-b58a-02886bf88032-kube-api-access-7pj88\") pod \"efd92a41-34d1-485e-b58a-02886bf88032\" (UID: \"efd92a41-34d1-485e-b58a-02886bf88032\") " Nov 25 18:05:18 crc kubenswrapper[4812]: I1125 18:05:18.726088 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/efd92a41-34d1-485e-b58a-02886bf88032-utilities\") pod \"efd92a41-34d1-485e-b58a-02886bf88032\" (UID: \"efd92a41-34d1-485e-b58a-02886bf88032\") " Nov 25 18:05:18 crc kubenswrapper[4812]: I1125 18:05:18.726234 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/efd92a41-34d1-485e-b58a-02886bf88032-catalog-content\") pod \"efd92a41-34d1-485e-b58a-02886bf88032\" (UID: \"efd92a41-34d1-485e-b58a-02886bf88032\") " Nov 25 18:05:18 crc kubenswrapper[4812]: I1125 18:05:18.727301 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/efd92a41-34d1-485e-b58a-02886bf88032-utilities" (OuterVolumeSpecName: "utilities") pod "efd92a41-34d1-485e-b58a-02886bf88032" (UID: "efd92a41-34d1-485e-b58a-02886bf88032"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:05:18 crc kubenswrapper[4812]: I1125 18:05:18.732046 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efd92a41-34d1-485e-b58a-02886bf88032-kube-api-access-7pj88" (OuterVolumeSpecName: "kube-api-access-7pj88") pod "efd92a41-34d1-485e-b58a-02886bf88032" (UID: "efd92a41-34d1-485e-b58a-02886bf88032"). InnerVolumeSpecName "kube-api-access-7pj88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:05:18 crc kubenswrapper[4812]: I1125 18:05:18.762867 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/efd92a41-34d1-485e-b58a-02886bf88032-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "efd92a41-34d1-485e-b58a-02886bf88032" (UID: "efd92a41-34d1-485e-b58a-02886bf88032"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:05:18 crc kubenswrapper[4812]: I1125 18:05:18.828582 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/efd92a41-34d1-485e-b58a-02886bf88032-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:05:18 crc kubenswrapper[4812]: I1125 18:05:18.828613 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7pj88\" (UniqueName: \"kubernetes.io/projected/efd92a41-34d1-485e-b58a-02886bf88032-kube-api-access-7pj88\") on node \"crc\" DevicePath \"\"" Nov 25 18:05:18 crc kubenswrapper[4812]: I1125 18:05:18.828626 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/efd92a41-34d1-485e-b58a-02886bf88032-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:05:19 crc kubenswrapper[4812]: I1125 18:05:19.135567 4812 generic.go:334] "Generic (PLEG): container finished" podID="efd92a41-34d1-485e-b58a-02886bf88032" containerID="eb624ac1b15c25095d3077262f939a4af9b71637b27e1db953943295a79e4568" exitCode=0 Nov 25 18:05:19 crc kubenswrapper[4812]: I1125 18:05:19.135625 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tl5cs" Nov 25 18:05:19 crc kubenswrapper[4812]: I1125 18:05:19.135669 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tl5cs" event={"ID":"efd92a41-34d1-485e-b58a-02886bf88032","Type":"ContainerDied","Data":"eb624ac1b15c25095d3077262f939a4af9b71637b27e1db953943295a79e4568"} Nov 25 18:05:19 crc kubenswrapper[4812]: I1125 18:05:19.136141 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tl5cs" event={"ID":"efd92a41-34d1-485e-b58a-02886bf88032","Type":"ContainerDied","Data":"0134fbdf63a6358a1b2f995f8f121d7f6529ff7415f0b9e1f6f4742fe0a27ce9"} Nov 25 18:05:19 crc kubenswrapper[4812]: I1125 18:05:19.136176 4812 scope.go:117] "RemoveContainer" containerID="eb624ac1b15c25095d3077262f939a4af9b71637b27e1db953943295a79e4568" Nov 25 18:05:19 crc kubenswrapper[4812]: I1125 18:05:19.191833 4812 scope.go:117] "RemoveContainer" containerID="69484863fd67c6dacca63301275034007eea5f3f923709e3c6070b137bad1a92" Nov 25 18:05:19 crc kubenswrapper[4812]: I1125 18:05:19.195027 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tl5cs"] Nov 25 18:05:19 crc kubenswrapper[4812]: I1125 18:05:19.204859 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-tl5cs"] Nov 25 18:05:19 crc kubenswrapper[4812]: I1125 18:05:19.238434 4812 scope.go:117] "RemoveContainer" containerID="aab1e5698c61610aca8b7a108c7aab74e6e7bb4e799d571965f926134ee0cbc9" Nov 25 18:05:19 crc kubenswrapper[4812]: I1125 18:05:19.282398 4812 scope.go:117] "RemoveContainer" containerID="eb624ac1b15c25095d3077262f939a4af9b71637b27e1db953943295a79e4568" Nov 25 18:05:19 crc kubenswrapper[4812]: E1125 18:05:19.283163 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb624ac1b15c25095d3077262f939a4af9b71637b27e1db953943295a79e4568\": container with ID starting with eb624ac1b15c25095d3077262f939a4af9b71637b27e1db953943295a79e4568 not found: ID does not exist" containerID="eb624ac1b15c25095d3077262f939a4af9b71637b27e1db953943295a79e4568" Nov 25 18:05:19 crc kubenswrapper[4812]: I1125 18:05:19.283217 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb624ac1b15c25095d3077262f939a4af9b71637b27e1db953943295a79e4568"} err="failed to get container status \"eb624ac1b15c25095d3077262f939a4af9b71637b27e1db953943295a79e4568\": rpc error: code = NotFound desc = could not find container \"eb624ac1b15c25095d3077262f939a4af9b71637b27e1db953943295a79e4568\": container with ID starting with eb624ac1b15c25095d3077262f939a4af9b71637b27e1db953943295a79e4568 not found: ID does not exist" Nov 25 18:05:19 crc kubenswrapper[4812]: I1125 18:05:19.283250 4812 scope.go:117] "RemoveContainer" containerID="69484863fd67c6dacca63301275034007eea5f3f923709e3c6070b137bad1a92" Nov 25 18:05:19 crc kubenswrapper[4812]: E1125 18:05:19.283904 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"69484863fd67c6dacca63301275034007eea5f3f923709e3c6070b137bad1a92\": container with ID starting with 69484863fd67c6dacca63301275034007eea5f3f923709e3c6070b137bad1a92 not found: ID does not exist" containerID="69484863fd67c6dacca63301275034007eea5f3f923709e3c6070b137bad1a92" Nov 25 18:05:19 crc kubenswrapper[4812]: I1125 18:05:19.283968 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"69484863fd67c6dacca63301275034007eea5f3f923709e3c6070b137bad1a92"} err="failed to get container status \"69484863fd67c6dacca63301275034007eea5f3f923709e3c6070b137bad1a92\": rpc error: code = NotFound desc = could not find container \"69484863fd67c6dacca63301275034007eea5f3f923709e3c6070b137bad1a92\": container with ID starting with 69484863fd67c6dacca63301275034007eea5f3f923709e3c6070b137bad1a92 not found: ID does not exist" Nov 25 18:05:19 crc kubenswrapper[4812]: I1125 18:05:19.284042 4812 scope.go:117] "RemoveContainer" containerID="aab1e5698c61610aca8b7a108c7aab74e6e7bb4e799d571965f926134ee0cbc9" Nov 25 18:05:19 crc kubenswrapper[4812]: E1125 18:05:19.284450 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aab1e5698c61610aca8b7a108c7aab74e6e7bb4e799d571965f926134ee0cbc9\": container with ID starting with aab1e5698c61610aca8b7a108c7aab74e6e7bb4e799d571965f926134ee0cbc9 not found: ID does not exist" containerID="aab1e5698c61610aca8b7a108c7aab74e6e7bb4e799d571965f926134ee0cbc9" Nov 25 18:05:19 crc kubenswrapper[4812]: I1125 18:05:19.284486 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aab1e5698c61610aca8b7a108c7aab74e6e7bb4e799d571965f926134ee0cbc9"} err="failed to get container status \"aab1e5698c61610aca8b7a108c7aab74e6e7bb4e799d571965f926134ee0cbc9\": rpc error: code = NotFound desc = could not find container \"aab1e5698c61610aca8b7a108c7aab74e6e7bb4e799d571965f926134ee0cbc9\": container with ID starting with aab1e5698c61610aca8b7a108c7aab74e6e7bb4e799d571965f926134ee0cbc9 not found: ID does not exist" Nov 25 18:05:19 crc kubenswrapper[4812]: I1125 18:05:19.847666 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efd92a41-34d1-485e-b58a-02886bf88032" path="/var/lib/kubelet/pods/efd92a41-34d1-485e-b58a-02886bf88032/volumes" Nov 25 18:05:21 crc kubenswrapper[4812]: I1125 18:05:21.067881 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 18:05:21 crc kubenswrapper[4812]: I1125 18:05:21.363913 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 18:05:21 crc kubenswrapper[4812]: I1125 18:05:21.832578 4812 scope.go:117] "RemoveContainer" containerID="ecc0184183ac35ed28d3bf0287ae00e97beefc4fd633395d2c10f1b0faea066b" Nov 25 18:05:21 crc kubenswrapper[4812]: E1125 18:05:21.833049 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:05:30 crc kubenswrapper[4812]: I1125 18:05:30.197122 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 18:05:30 crc kubenswrapper[4812]: I1125 18:05:30.199766 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 18:05:30 crc kubenswrapper[4812]: I1125 18:05:30.199826 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/manila-api-0" Nov 25 18:05:30 crc kubenswrapper[4812]: I1125 18:05:30.200815 4812 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="manila-api" containerStatusID={"Type":"cri-o","ID":"3c2116323c7b46b277a1dfb91c32043c4f95b03654aa19daa180f605a9ebbb98"} pod="openstack/manila-api-0" containerMessage="Container manila-api failed liveness probe, will be restarted" Nov 25 18:05:30 crc kubenswrapper[4812]: I1125 18:05:30.200868 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" containerID="cri-o://3c2116323c7b46b277a1dfb91c32043c4f95b03654aa19daa180f605a9ebbb98" gracePeriod=30 Nov 25 18:05:30 crc kubenswrapper[4812]: I1125 18:05:30.209419 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="Get \"https://10.217.1.1:8786/healthcheck\": EOF" Nov 25 18:05:33 crc kubenswrapper[4812]: I1125 18:05:33.833467 4812 scope.go:117] "RemoveContainer" containerID="ecc0184183ac35ed28d3bf0287ae00e97beefc4fd633395d2c10f1b0faea066b" Nov 25 18:05:33 crc kubenswrapper[4812]: E1125 18:05:33.835240 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:05:34 crc kubenswrapper[4812]: I1125 18:05:34.321063 4812 generic.go:334] "Generic (PLEG): container finished" podID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerID="3c2116323c7b46b277a1dfb91c32043c4f95b03654aa19daa180f605a9ebbb98" exitCode=0 Nov 25 18:05:34 crc kubenswrapper[4812]: I1125 18:05:34.321122 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"6d2f6681-d1d6-45e9-b0f4-65209caf0069","Type":"ContainerDied","Data":"3c2116323c7b46b277a1dfb91c32043c4f95b03654aa19daa180f605a9ebbb98"} Nov 25 18:05:34 crc kubenswrapper[4812]: I1125 18:05:34.321169 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"6d2f6681-d1d6-45e9-b0f4-65209caf0069","Type":"ContainerStarted","Data":"486e8f4338034b71187830cbe7a8041f996992d1857e8e19804e6b6a2e9d5ee8"} Nov 25 18:05:34 crc kubenswrapper[4812]: I1125 18:05:34.321188 4812 scope.go:117] "RemoveContainer" containerID="07e88d323b14a7c558d194e2f38cb6d35fc84867a6ccc69f5e8941fc613662e9" Nov 25 18:05:34 crc kubenswrapper[4812]: I1125 18:05:34.321323 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/manila-api-0" Nov 25 18:05:46 crc kubenswrapper[4812]: I1125 18:05:46.832102 4812 scope.go:117] "RemoveContainer" containerID="ecc0184183ac35ed28d3bf0287ae00e97beefc4fd633395d2c10f1b0faea066b" Nov 25 18:05:46 crc kubenswrapper[4812]: E1125 18:05:46.833144 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:05:51 crc kubenswrapper[4812]: I1125 18:05:51.293748 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 18:05:51 crc kubenswrapper[4812]: I1125 18:05:51.307186 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 18:05:57 crc kubenswrapper[4812]: I1125 18:05:57.831866 4812 scope.go:117] "RemoveContainer" containerID="ecc0184183ac35ed28d3bf0287ae00e97beefc4fd633395d2c10f1b0faea066b" Nov 25 18:05:57 crc kubenswrapper[4812]: E1125 18:05:57.832656 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:06:01 crc kubenswrapper[4812]: I1125 18:06:01.114061 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 18:06:01 crc kubenswrapper[4812]: I1125 18:06:01.147574 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 18:06:10 crc kubenswrapper[4812]: I1125 18:06:10.194728 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 18:06:10 crc kubenswrapper[4812]: I1125 18:06:10.194747 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 18:06:10 crc kubenswrapper[4812]: I1125 18:06:10.195622 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/manila-api-0" Nov 25 18:06:10 crc kubenswrapper[4812]: I1125 18:06:10.196718 4812 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="manila-api" containerStatusID={"Type":"cri-o","ID":"486e8f4338034b71187830cbe7a8041f996992d1857e8e19804e6b6a2e9d5ee8"} pod="openstack/manila-api-0" containerMessage="Container manila-api failed liveness probe, will be restarted" Nov 25 18:06:10 crc kubenswrapper[4812]: I1125 18:06:10.196802 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" containerID="cri-o://486e8f4338034b71187830cbe7a8041f996992d1857e8e19804e6b6a2e9d5ee8" gracePeriod=30 Nov 25 18:06:10 crc kubenswrapper[4812]: I1125 18:06:10.204718 4812 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerName="manila-api" probeResult="failure" output="Get \"https://10.217.1.1:8786/healthcheck\": EOF" Nov 25 18:06:12 crc kubenswrapper[4812]: I1125 18:06:12.831041 4812 scope.go:117] "RemoveContainer" containerID="ecc0184183ac35ed28d3bf0287ae00e97beefc4fd633395d2c10f1b0faea066b" Nov 25 18:06:12 crc kubenswrapper[4812]: E1125 18:06:12.831574 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:06:13 crc kubenswrapper[4812]: E1125 18:06:13.433421 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 18:06:13 crc kubenswrapper[4812]: I1125 18:06:13.818885 4812 generic.go:334] "Generic (PLEG): container finished" podID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" containerID="486e8f4338034b71187830cbe7a8041f996992d1857e8e19804e6b6a2e9d5ee8" exitCode=0 Nov 25 18:06:13 crc kubenswrapper[4812]: I1125 18:06:13.818988 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"6d2f6681-d1d6-45e9-b0f4-65209caf0069","Type":"ContainerDied","Data":"486e8f4338034b71187830cbe7a8041f996992d1857e8e19804e6b6a2e9d5ee8"} Nov 25 18:06:13 crc kubenswrapper[4812]: I1125 18:06:13.819071 4812 scope.go:117] "RemoveContainer" containerID="3c2116323c7b46b277a1dfb91c32043c4f95b03654aa19daa180f605a9ebbb98" Nov 25 18:06:13 crc kubenswrapper[4812]: I1125 18:06:13.820292 4812 scope.go:117] "RemoveContainer" containerID="486e8f4338034b71187830cbe7a8041f996992d1857e8e19804e6b6a2e9d5ee8" Nov 25 18:06:13 crc kubenswrapper[4812]: E1125 18:06:13.820777 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 18:06:24 crc kubenswrapper[4812]: I1125 18:06:24.831568 4812 scope.go:117] "RemoveContainer" containerID="ecc0184183ac35ed28d3bf0287ae00e97beefc4fd633395d2c10f1b0faea066b" Nov 25 18:06:24 crc kubenswrapper[4812]: I1125 18:06:24.832213 4812 scope.go:117] "RemoveContainer" containerID="486e8f4338034b71187830cbe7a8041f996992d1857e8e19804e6b6a2e9d5ee8" Nov 25 18:06:24 crc kubenswrapper[4812]: E1125 18:06:24.832359 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:06:24 crc kubenswrapper[4812]: E1125 18:06:24.832491 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 18:06:35 crc kubenswrapper[4812]: I1125 18:06:35.844107 4812 scope.go:117] "RemoveContainer" containerID="ecc0184183ac35ed28d3bf0287ae00e97beefc4fd633395d2c10f1b0faea066b" Nov 25 18:06:35 crc kubenswrapper[4812]: E1125 18:06:35.845331 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:06:37 crc kubenswrapper[4812]: I1125 18:06:37.833811 4812 scope.go:117] "RemoveContainer" containerID="486e8f4338034b71187830cbe7a8041f996992d1857e8e19804e6b6a2e9d5ee8" Nov 25 18:06:37 crc kubenswrapper[4812]: E1125 18:06:37.835029 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 18:06:47 crc kubenswrapper[4812]: I1125 18:06:47.832093 4812 scope.go:117] "RemoveContainer" containerID="ecc0184183ac35ed28d3bf0287ae00e97beefc4fd633395d2c10f1b0faea066b" Nov 25 18:06:47 crc kubenswrapper[4812]: E1125 18:06:47.832937 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:06:51 crc kubenswrapper[4812]: I1125 18:06:51.831678 4812 scope.go:117] "RemoveContainer" containerID="486e8f4338034b71187830cbe7a8041f996992d1857e8e19804e6b6a2e9d5ee8" Nov 25 18:06:51 crc kubenswrapper[4812]: E1125 18:06:51.832649 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 18:07:01 crc kubenswrapper[4812]: I1125 18:07:01.832281 4812 scope.go:117] "RemoveContainer" containerID="ecc0184183ac35ed28d3bf0287ae00e97beefc4fd633395d2c10f1b0faea066b" Nov 25 18:07:01 crc kubenswrapper[4812]: E1125 18:07:01.833070 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:07:03 crc kubenswrapper[4812]: I1125 18:07:03.832260 4812 scope.go:117] "RemoveContainer" containerID="486e8f4338034b71187830cbe7a8041f996992d1857e8e19804e6b6a2e9d5ee8" Nov 25 18:07:03 crc kubenswrapper[4812]: E1125 18:07:03.832979 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 18:07:13 crc kubenswrapper[4812]: I1125 18:07:13.832433 4812 scope.go:117] "RemoveContainer" containerID="ecc0184183ac35ed28d3bf0287ae00e97beefc4fd633395d2c10f1b0faea066b" Nov 25 18:07:13 crc kubenswrapper[4812]: E1125 18:07:13.834096 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:07:16 crc kubenswrapper[4812]: I1125 18:07:16.832843 4812 scope.go:117] "RemoveContainer" containerID="486e8f4338034b71187830cbe7a8041f996992d1857e8e19804e6b6a2e9d5ee8" Nov 25 18:07:16 crc kubenswrapper[4812]: E1125 18:07:16.833341 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 18:07:25 crc kubenswrapper[4812]: I1125 18:07:25.838118 4812 scope.go:117] "RemoveContainer" containerID="ecc0184183ac35ed28d3bf0287ae00e97beefc4fd633395d2c10f1b0faea066b" Nov 25 18:07:25 crc kubenswrapper[4812]: E1125 18:07:25.838862 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:07:27 crc kubenswrapper[4812]: I1125 18:07:27.333389 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:07:27 crc kubenswrapper[4812]: I1125 18:07:27.333798 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:07:29 crc kubenswrapper[4812]: I1125 18:07:29.743380 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-4jsp6/must-gather-c82df"] Nov 25 18:07:29 crc kubenswrapper[4812]: E1125 18:07:29.744170 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89c81428-b2ee-4ffd-8386-e35f6f08e5fb" containerName="extract-content" Nov 25 18:07:29 crc kubenswrapper[4812]: I1125 18:07:29.744183 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="89c81428-b2ee-4ffd-8386-e35f6f08e5fb" containerName="extract-content" Nov 25 18:07:29 crc kubenswrapper[4812]: E1125 18:07:29.744195 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="efd92a41-34d1-485e-b58a-02886bf88032" containerName="registry-server" Nov 25 18:07:29 crc kubenswrapper[4812]: I1125 18:07:29.744201 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="efd92a41-34d1-485e-b58a-02886bf88032" containerName="registry-server" Nov 25 18:07:29 crc kubenswrapper[4812]: E1125 18:07:29.744225 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89c81428-b2ee-4ffd-8386-e35f6f08e5fb" containerName="registry-server" Nov 25 18:07:29 crc kubenswrapper[4812]: I1125 18:07:29.744232 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="89c81428-b2ee-4ffd-8386-e35f6f08e5fb" containerName="registry-server" Nov 25 18:07:29 crc kubenswrapper[4812]: E1125 18:07:29.744255 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="efd92a41-34d1-485e-b58a-02886bf88032" containerName="extract-content" Nov 25 18:07:29 crc kubenswrapper[4812]: I1125 18:07:29.744260 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="efd92a41-34d1-485e-b58a-02886bf88032" containerName="extract-content" Nov 25 18:07:29 crc kubenswrapper[4812]: E1125 18:07:29.744267 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89c81428-b2ee-4ffd-8386-e35f6f08e5fb" containerName="extract-utilities" Nov 25 18:07:29 crc kubenswrapper[4812]: I1125 18:07:29.744274 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="89c81428-b2ee-4ffd-8386-e35f6f08e5fb" containerName="extract-utilities" Nov 25 18:07:29 crc kubenswrapper[4812]: E1125 18:07:29.744283 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="efd92a41-34d1-485e-b58a-02886bf88032" containerName="extract-utilities" Nov 25 18:07:29 crc kubenswrapper[4812]: I1125 18:07:29.744288 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="efd92a41-34d1-485e-b58a-02886bf88032" containerName="extract-utilities" Nov 25 18:07:29 crc kubenswrapper[4812]: I1125 18:07:29.744458 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="efd92a41-34d1-485e-b58a-02886bf88032" containerName="registry-server" Nov 25 18:07:29 crc kubenswrapper[4812]: I1125 18:07:29.744483 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="89c81428-b2ee-4ffd-8386-e35f6f08e5fb" containerName="registry-server" Nov 25 18:07:29 crc kubenswrapper[4812]: I1125 18:07:29.745576 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-4jsp6/must-gather-c82df" Nov 25 18:07:29 crc kubenswrapper[4812]: I1125 18:07:29.752130 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-4jsp6"/"openshift-service-ca.crt" Nov 25 18:07:29 crc kubenswrapper[4812]: I1125 18:07:29.752178 4812 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-4jsp6"/"default-dockercfg-8k4tn" Nov 25 18:07:29 crc kubenswrapper[4812]: I1125 18:07:29.752430 4812 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-4jsp6"/"kube-root-ca.crt" Nov 25 18:07:29 crc kubenswrapper[4812]: I1125 18:07:29.763562 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-4jsp6/must-gather-c82df"] Nov 25 18:07:29 crc kubenswrapper[4812]: I1125 18:07:29.832022 4812 scope.go:117] "RemoveContainer" containerID="486e8f4338034b71187830cbe7a8041f996992d1857e8e19804e6b6a2e9d5ee8" Nov 25 18:07:29 crc kubenswrapper[4812]: E1125 18:07:29.832305 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 18:07:29 crc kubenswrapper[4812]: I1125 18:07:29.874619 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/7a0c3e31-def2-4f23-b5c4-13a6d052c06e-must-gather-output\") pod \"must-gather-c82df\" (UID: \"7a0c3e31-def2-4f23-b5c4-13a6d052c06e\") " pod="openshift-must-gather-4jsp6/must-gather-c82df" Nov 25 18:07:29 crc kubenswrapper[4812]: I1125 18:07:29.875496 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d5kwf\" (UniqueName: \"kubernetes.io/projected/7a0c3e31-def2-4f23-b5c4-13a6d052c06e-kube-api-access-d5kwf\") pod \"must-gather-c82df\" (UID: \"7a0c3e31-def2-4f23-b5c4-13a6d052c06e\") " pod="openshift-must-gather-4jsp6/must-gather-c82df" Nov 25 18:07:29 crc kubenswrapper[4812]: I1125 18:07:29.976932 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/7a0c3e31-def2-4f23-b5c4-13a6d052c06e-must-gather-output\") pod \"must-gather-c82df\" (UID: \"7a0c3e31-def2-4f23-b5c4-13a6d052c06e\") " pod="openshift-must-gather-4jsp6/must-gather-c82df" Nov 25 18:07:29 crc kubenswrapper[4812]: I1125 18:07:29.977197 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d5kwf\" (UniqueName: \"kubernetes.io/projected/7a0c3e31-def2-4f23-b5c4-13a6d052c06e-kube-api-access-d5kwf\") pod \"must-gather-c82df\" (UID: \"7a0c3e31-def2-4f23-b5c4-13a6d052c06e\") " pod="openshift-must-gather-4jsp6/must-gather-c82df" Nov 25 18:07:29 crc kubenswrapper[4812]: I1125 18:07:29.978863 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/7a0c3e31-def2-4f23-b5c4-13a6d052c06e-must-gather-output\") pod \"must-gather-c82df\" (UID: \"7a0c3e31-def2-4f23-b5c4-13a6d052c06e\") " pod="openshift-must-gather-4jsp6/must-gather-c82df" Nov 25 18:07:30 crc kubenswrapper[4812]: I1125 18:07:30.007116 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d5kwf\" (UniqueName: \"kubernetes.io/projected/7a0c3e31-def2-4f23-b5c4-13a6d052c06e-kube-api-access-d5kwf\") pod \"must-gather-c82df\" (UID: \"7a0c3e31-def2-4f23-b5c4-13a6d052c06e\") " pod="openshift-must-gather-4jsp6/must-gather-c82df" Nov 25 18:07:30 crc kubenswrapper[4812]: I1125 18:07:30.064256 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-4jsp6/must-gather-c82df" Nov 25 18:07:30 crc kubenswrapper[4812]: W1125 18:07:30.606849 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7a0c3e31_def2_4f23_b5c4_13a6d052c06e.slice/crio-29c6ee398948e625b926610a78eab70f5a6e2961fde3abd8124f47193dad6d15 WatchSource:0}: Error finding container 29c6ee398948e625b926610a78eab70f5a6e2961fde3abd8124f47193dad6d15: Status 404 returned error can't find the container with id 29c6ee398948e625b926610a78eab70f5a6e2961fde3abd8124f47193dad6d15 Nov 25 18:07:30 crc kubenswrapper[4812]: I1125 18:07:30.609691 4812 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 18:07:30 crc kubenswrapper[4812]: I1125 18:07:30.612496 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-4jsp6/must-gather-c82df"] Nov 25 18:07:30 crc kubenswrapper[4812]: I1125 18:07:30.988500 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-4jsp6/must-gather-c82df" event={"ID":"7a0c3e31-def2-4f23-b5c4-13a6d052c06e","Type":"ContainerStarted","Data":"29c6ee398948e625b926610a78eab70f5a6e2961fde3abd8124f47193dad6d15"} Nov 25 18:07:37 crc kubenswrapper[4812]: I1125 18:07:37.832389 4812 scope.go:117] "RemoveContainer" containerID="ecc0184183ac35ed28d3bf0287ae00e97beefc4fd633395d2c10f1b0faea066b" Nov 25 18:07:37 crc kubenswrapper[4812]: E1125 18:07:37.833386 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:07:41 crc kubenswrapper[4812]: I1125 18:07:41.763810 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="24d72c8f-af4a-4e0a-a148-0a5437c540b4" containerName="ceilometer-central-agent" probeResult="failure" output="command timed out" Nov 25 18:07:43 crc kubenswrapper[4812]: I1125 18:07:43.831332 4812 scope.go:117] "RemoveContainer" containerID="486e8f4338034b71187830cbe7a8041f996992d1857e8e19804e6b6a2e9d5ee8" Nov 25 18:07:43 crc kubenswrapper[4812]: E1125 18:07:43.831782 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 18:07:44 crc kubenswrapper[4812]: E1125 18:07:44.990134 4812 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/openstack-must-gather:latest" Nov 25 18:07:44 crc kubenswrapper[4812]: E1125 18:07:44.990288 4812 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 25 18:07:44 crc kubenswrapper[4812]: container &Container{Name:gather,Image:quay.io/openstack-k8s-operators/openstack-must-gather:latest,Command:[/bin/bash -c Nov 25 18:07:44 crc kubenswrapper[4812]: echo "[disk usage checker] Started" Nov 25 18:07:44 crc kubenswrapper[4812]: target_dir="/must-gather" Nov 25 18:07:44 crc kubenswrapper[4812]: usage_percentage_limit="70" Nov 25 18:07:44 crc kubenswrapper[4812]: while true; do Nov 25 18:07:44 crc kubenswrapper[4812]: usage_percentage=$(df -P "$target_dir" | awk 'NR==2 {print $5}' | sed 's/%//') Nov 25 18:07:44 crc kubenswrapper[4812]: echo "[disk usage checker] Volume usage percentage: current = ${usage_percentage} ; allowed = ${usage_percentage_limit}" Nov 25 18:07:44 crc kubenswrapper[4812]: if [ "$usage_percentage" -gt "$usage_percentage_limit" ]; then Nov 25 18:07:44 crc kubenswrapper[4812]: echo "[disk usage checker] Disk usage exceeds the volume percentage of ${usage_percentage_limit} for mounted directory, terminating..." Nov 25 18:07:44 crc kubenswrapper[4812]: ps -o sess --no-headers | sort -u | while read sid; do Nov 25 18:07:44 crc kubenswrapper[4812]: [[ "$sid" -eq "${$}" ]] && continue Nov 25 18:07:44 crc kubenswrapper[4812]: pkill --signal SIGKILL --session "$sid" Nov 25 18:07:44 crc kubenswrapper[4812]: done Nov 25 18:07:44 crc kubenswrapper[4812]: exit 1 Nov 25 18:07:44 crc kubenswrapper[4812]: fi Nov 25 18:07:44 crc kubenswrapper[4812]: sleep 5 Nov 25 18:07:44 crc kubenswrapper[4812]: done & setsid -w bash <<-MUSTGATHER_EOF Nov 25 18:07:44 crc kubenswrapper[4812]: ADDITIONAL_NAMESPACES=kuttl,openshift-storage,openshift-marketplace,openshift-operators,sushy-emulator,tobiko OPENSTACK_DATABASES=ALL SOS_EDPM=all SOS_DECOMPRESS=0 gather Nov 25 18:07:44 crc kubenswrapper[4812]: MUSTGATHER_EOF Nov 25 18:07:44 crc kubenswrapper[4812]: sync && echo 'Caches written to disk'],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:NODE_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:must-gather-output,ReadOnly:false,MountPath:/must-gather,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-d5kwf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod must-gather-c82df_openshift-must-gather-4jsp6(7a0c3e31-def2-4f23-b5c4-13a6d052c06e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled Nov 25 18:07:44 crc kubenswrapper[4812]: > logger="UnhandledError" Nov 25 18:07:44 crc kubenswrapper[4812]: E1125 18:07:44.992823 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"gather\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"copy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-must-gather:latest\\\"\"]" pod="openshift-must-gather-4jsp6/must-gather-c82df" podUID="7a0c3e31-def2-4f23-b5c4-13a6d052c06e" Nov 25 18:07:45 crc kubenswrapper[4812]: E1125 18:07:45.139450 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"gather\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-must-gather:latest\\\"\", failed to \"StartContainer\" for \"copy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-must-gather:latest\\\"\"]" pod="openshift-must-gather-4jsp6/must-gather-c82df" podUID="7a0c3e31-def2-4f23-b5c4-13a6d052c06e" Nov 25 18:07:51 crc kubenswrapper[4812]: I1125 18:07:51.833286 4812 scope.go:117] "RemoveContainer" containerID="ecc0184183ac35ed28d3bf0287ae00e97beefc4fd633395d2c10f1b0faea066b" Nov 25 18:07:51 crc kubenswrapper[4812]: E1125 18:07:51.834240 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:07:55 crc kubenswrapper[4812]: I1125 18:07:55.838257 4812 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openshift-must-gather-4jsp6/must-gather-c82df" secret="" err="secret \"default-dockercfg-8k4tn\" not found" Nov 25 18:07:55 crc kubenswrapper[4812]: E1125 18:07:55.847686 4812 projected.go:288] Couldn't get configMap openshift-must-gather-4jsp6/kube-root-ca.crt: configmap "kube-root-ca.crt" not found Nov 25 18:07:55 crc kubenswrapper[4812]: E1125 18:07:55.848147 4812 projected.go:288] Couldn't get configMap openshift-must-gather-4jsp6/openshift-service-ca.crt: configmap "openshift-service-ca.crt" not found Nov 25 18:07:55 crc kubenswrapper[4812]: E1125 18:07:55.848236 4812 projected.go:194] Error preparing data for projected volume kube-api-access-d5kwf for pod openshift-must-gather-4jsp6/must-gather-c82df: [configmap "kube-root-ca.crt" not found, configmap "openshift-service-ca.crt" not found] Nov 25 18:07:55 crc kubenswrapper[4812]: E1125 18:07:55.848349 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7a0c3e31-def2-4f23-b5c4-13a6d052c06e-kube-api-access-d5kwf podName:7a0c3e31-def2-4f23-b5c4-13a6d052c06e nodeName:}" failed. No retries permitted until 2025-11-25 18:07:56.348333775 +0000 UTC m=+4851.188475870 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-d5kwf" (UniqueName: "kubernetes.io/projected/7a0c3e31-def2-4f23-b5c4-13a6d052c06e-kube-api-access-d5kwf") pod "must-gather-c82df" (UID: "7a0c3e31-def2-4f23-b5c4-13a6d052c06e") : [configmap "kube-root-ca.crt" not found, configmap "openshift-service-ca.crt" not found] Nov 25 18:07:55 crc kubenswrapper[4812]: I1125 18:07:55.863331 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-4jsp6/must-gather-c82df"] Nov 25 18:07:55 crc kubenswrapper[4812]: I1125 18:07:55.876170 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-4jsp6/must-gather-c82df"] Nov 25 18:07:56 crc kubenswrapper[4812]: E1125 18:07:56.356005 4812 projected.go:288] Couldn't get configMap openshift-must-gather-4jsp6/kube-root-ca.crt: configmap "kube-root-ca.crt" not found Nov 25 18:07:56 crc kubenswrapper[4812]: E1125 18:07:56.356054 4812 projected.go:288] Couldn't get configMap openshift-must-gather-4jsp6/openshift-service-ca.crt: configmap "openshift-service-ca.crt" not found Nov 25 18:07:56 crc kubenswrapper[4812]: E1125 18:07:56.356069 4812 projected.go:194] Error preparing data for projected volume kube-api-access-d5kwf for pod openshift-must-gather-4jsp6/must-gather-c82df: [configmap "kube-root-ca.crt" not found, configmap "openshift-service-ca.crt" not found] Nov 25 18:07:56 crc kubenswrapper[4812]: E1125 18:07:56.356158 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7a0c3e31-def2-4f23-b5c4-13a6d052c06e-kube-api-access-d5kwf podName:7a0c3e31-def2-4f23-b5c4-13a6d052c06e nodeName:}" failed. No retries permitted until 2025-11-25 18:07:57.356122529 +0000 UTC m=+4852.196264624 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-d5kwf" (UniqueName: "kubernetes.io/projected/7a0c3e31-def2-4f23-b5c4-13a6d052c06e-kube-api-access-d5kwf") pod "must-gather-c82df" (UID: "7a0c3e31-def2-4f23-b5c4-13a6d052c06e") : [configmap "kube-root-ca.crt" not found, configmap "openshift-service-ca.crt" not found] Nov 25 18:07:56 crc kubenswrapper[4812]: E1125 18:07:56.366169 4812 event.go:359] "Server rejected event (will not retry!)" err="namespaces \"openshift-must-gather-4jsp6\" not found" event="&Event{ObjectMeta:{must-gather-c82df.187b523740f30e77 openshift-must-gather-4jsp6 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-must-gather-4jsp6,Name:must-gather-c82df,UID:7a0c3e31-def2-4f23-b5c4-13a6d052c06e,APIVersion:v1,ResourceVersion:73947,FieldPath:spec.containers{gather},},Reason:Pulled,Message:Successfully pulled image \"quay.io/openstack-k8s-operators/openstack-must-gather:latest\" in 471ms (471ms including waiting). Image size: 403880672 bytes.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 18:07:56.312030839 +0000 UTC m=+4851.152172924,LastTimestamp:2025-11-25 18:07:56.312030839 +0000 UTC m=+4851.152172924,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 18:07:56 crc kubenswrapper[4812]: E1125 18:07:56.427355 4812 event.go:359] "Server rejected event (will not retry!)" err="namespaces \"openshift-must-gather-4jsp6\" not found" event="&Event{ObjectMeta:{must-gather-c82df.187b5237254f753f openshift-must-gather-4jsp6 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-must-gather-4jsp6,Name:must-gather-c82df,UID:7a0c3e31-def2-4f23-b5c4-13a6d052c06e,APIVersion:v1,ResourceVersion:73947,FieldPath:,},Reason:FailedMount,Message:MountVolume.SetUp failed for volume \"kube-api-access-d5kwf\" : [configmap \"kube-root-ca.crt\" not found, configmap \"openshift-service-ca.crt\" not found],Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 18:07:55.848324415 +0000 UTC m=+4850.688466510,LastTimestamp:2025-11-25 18:07:56.356098938 +0000 UTC m=+4851.196241043,Count:2,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 18:07:56 crc kubenswrapper[4812]: E1125 18:07:56.739286 4812 event.go:359] "Server rejected event (will not retry!)" err="namespaces \"openshift-must-gather-4jsp6\" not found" event="&Event{ObjectMeta:{must-gather-c82df.187b52375729c0f3 openshift-must-gather-4jsp6 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-must-gather-4jsp6,Name:must-gather-c82df,UID:7a0c3e31-def2-4f23-b5c4-13a6d052c06e,APIVersion:v1,ResourceVersion:73947,FieldPath:spec.containers{gather},},Reason:Created,Message:Created container gather,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 18:07:56.684714227 +0000 UTC m=+4851.524856322,LastTimestamp:2025-11-25 18:07:56.684714227 +0000 UTC m=+4851.524856322,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 18:07:56 crc kubenswrapper[4812]: E1125 18:07:56.794021 4812 event.go:359] "Server rejected event (will not retry!)" err="namespaces \"openshift-must-gather-4jsp6\" not found" event="&Event{ObjectMeta:{must-gather-c82df.187b523757a99cc6 openshift-must-gather-4jsp6 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-must-gather-4jsp6,Name:must-gather-c82df,UID:7a0c3e31-def2-4f23-b5c4-13a6d052c06e,APIVersion:v1,ResourceVersion:73947,FieldPath:spec.containers{gather},},Reason:Started,Message:Started container gather,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 18:07:56.693093574 +0000 UTC m=+4851.533235669,LastTimestamp:2025-11-25 18:07:56.693093574 +0000 UTC m=+4851.533235669,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 18:07:56 crc kubenswrapper[4812]: I1125 18:07:56.831343 4812 scope.go:117] "RemoveContainer" containerID="486e8f4338034b71187830cbe7a8041f996992d1857e8e19804e6b6a2e9d5ee8" Nov 25 18:07:56 crc kubenswrapper[4812]: E1125 18:07:56.831577 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 18:07:56 crc kubenswrapper[4812]: E1125 18:07:56.847741 4812 event.go:359] "Server rejected event (will not retry!)" err="namespaces \"openshift-must-gather-4jsp6\" not found" event="&Event{ObjectMeta:{must-gather-c82df.187b523757cc38b3 openshift-must-gather-4jsp6 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-must-gather-4jsp6,Name:must-gather-c82df,UID:7a0c3e31-def2-4f23-b5c4-13a6d052c06e,APIVersion:v1,ResourceVersion:73947,FieldPath:spec.containers{copy},},Reason:Pulled,Message:Container image \"quay.io/openstack-k8s-operators/openstack-must-gather:latest\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 18:07:56.695361715 +0000 UTC m=+4851.535503810,LastTimestamp:2025-11-25 18:07:56.695361715 +0000 UTC m=+4851.535503810,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 18:07:57 crc kubenswrapper[4812]: E1125 18:07:57.018760 4812 event.go:359] "Server rejected event (will not retry!)" err="namespaces \"openshift-must-gather-4jsp6\" not found" event="&Event{ObjectMeta:{must-gather-c82df.187b523767c07887 openshift-must-gather-4jsp6 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-must-gather-4jsp6,Name:must-gather-c82df,UID:7a0c3e31-def2-4f23-b5c4-13a6d052c06e,APIVersion:v1,ResourceVersion:73947,FieldPath:spec.containers{copy},},Reason:Created,Message:Created container copy,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 18:07:56.963027079 +0000 UTC m=+4851.803169174,LastTimestamp:2025-11-25 18:07:56.963027079 +0000 UTC m=+4851.803169174,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 18:07:57 crc kubenswrapper[4812]: E1125 18:07:57.073111 4812 event.go:359] "Server rejected event (will not retry!)" err="namespaces \"openshift-must-gather-4jsp6\" not found" event="&Event{ObjectMeta:{must-gather-c82df.187b523768de219c openshift-must-gather-4jsp6 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-must-gather-4jsp6,Name:must-gather-c82df,UID:7a0c3e31-def2-4f23-b5c4-13a6d052c06e,APIVersion:v1,ResourceVersion:73947,FieldPath:spec.containers{copy},},Reason:Started,Message:Started container copy,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 18:07:56.981748124 +0000 UTC m=+4851.821890219,LastTimestamp:2025-11-25 18:07:56.981748124 +0000 UTC m=+4851.821890219,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 18:07:57 crc kubenswrapper[4812]: I1125 18:07:57.249716 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-4jsp6/must-gather-c82df" podUID="7a0c3e31-def2-4f23-b5c4-13a6d052c06e" containerName="gather" containerID="cri-o://cf64c89ab176b35d453d76d606017e4a2d478b594d1f55f84d96052d41fc6048" gracePeriod=2 Nov 25 18:07:57 crc kubenswrapper[4812]: I1125 18:07:57.250064 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-4jsp6/must-gather-c82df" podUID="7a0c3e31-def2-4f23-b5c4-13a6d052c06e" containerName="copy" containerID="cri-o://9ad13266decd54b9cc03443bd54c1158974f93c309eac671e0e4a0dd1161a314" gracePeriod=2 Nov 25 18:07:57 crc kubenswrapper[4812]: E1125 18:07:57.303936 4812 event.go:359] "Server rejected event (will not retry!)" err="namespaces \"openshift-must-gather-4jsp6\" not found" event="&Event{ObjectMeta:{must-gather-c82df.187b523778d6b8f4 openshift-must-gather-4jsp6 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-must-gather-4jsp6,Name:must-gather-c82df,UID:7a0c3e31-def2-4f23-b5c4-13a6d052c06e,APIVersion:v1,ResourceVersion:73947,FieldPath:spec.containers{gather},},Reason:Killing,Message:Stopping container gather,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 18:07:57.249698036 +0000 UTC m=+4852.089840131,LastTimestamp:2025-11-25 18:07:57.249698036 +0000 UTC m=+4852.089840131,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 18:07:57 crc kubenswrapper[4812]: I1125 18:07:57.332604 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:07:57 crc kubenswrapper[4812]: I1125 18:07:57.332680 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:07:57 crc kubenswrapper[4812]: E1125 18:07:57.357990 4812 event.go:359] "Server rejected event (will not retry!)" err="namespaces \"openshift-must-gather-4jsp6\" not found" event="&Event{ObjectMeta:{must-gather-c82df.187b523778dc2905 openshift-must-gather-4jsp6 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-must-gather-4jsp6,Name:must-gather-c82df,UID:7a0c3e31-def2-4f23-b5c4-13a6d052c06e,APIVersion:v1,ResourceVersion:73947,FieldPath:spec.containers{copy},},Reason:Killing,Message:Stopping container copy,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 18:07:57.250054405 +0000 UTC m=+4852.090196500,LastTimestamp:2025-11-25 18:07:57.250054405 +0000 UTC m=+4852.090196500,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 18:07:57 crc kubenswrapper[4812]: E1125 18:07:57.381282 4812 projected.go:288] Couldn't get configMap openshift-must-gather-4jsp6/kube-root-ca.crt: configmap "kube-root-ca.crt" not found Nov 25 18:07:57 crc kubenswrapper[4812]: E1125 18:07:57.381322 4812 projected.go:288] Couldn't get configMap openshift-must-gather-4jsp6/openshift-service-ca.crt: configmap "openshift-service-ca.crt" not found Nov 25 18:07:57 crc kubenswrapper[4812]: E1125 18:07:57.381338 4812 projected.go:194] Error preparing data for projected volume kube-api-access-d5kwf for pod openshift-must-gather-4jsp6/must-gather-c82df: [configmap "kube-root-ca.crt" not found, configmap "openshift-service-ca.crt" not found] Nov 25 18:07:57 crc kubenswrapper[4812]: E1125 18:07:57.381392 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7a0c3e31-def2-4f23-b5c4-13a6d052c06e-kube-api-access-d5kwf podName:7a0c3e31-def2-4f23-b5c4-13a6d052c06e nodeName:}" failed. No retries permitted until 2025-11-25 18:07:59.38137309 +0000 UTC m=+4854.221515205 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-d5kwf" (UniqueName: "kubernetes.io/projected/7a0c3e31-def2-4f23-b5c4-13a6d052c06e-kube-api-access-d5kwf") pod "must-gather-c82df" (UID: "7a0c3e31-def2-4f23-b5c4-13a6d052c06e") : [configmap "kube-root-ca.crt" not found, configmap "openshift-service-ca.crt" not found] Nov 25 18:07:57 crc kubenswrapper[4812]: E1125 18:07:57.440141 4812 event.go:359] "Server rejected event (will not retry!)" err="namespaces \"openshift-must-gather-4jsp6\" not found" event="&Event{ObjectMeta:{must-gather-c82df.187b5237254f753f openshift-must-gather-4jsp6 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-must-gather-4jsp6,Name:must-gather-c82df,UID:7a0c3e31-def2-4f23-b5c4-13a6d052c06e,APIVersion:v1,ResourceVersion:73947,FieldPath:,},Reason:FailedMount,Message:MountVolume.SetUp failed for volume \"kube-api-access-d5kwf\" : [configmap \"kube-root-ca.crt\" not found, configmap \"openshift-service-ca.crt\" not found],Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 18:07:55.848324415 +0000 UTC m=+4850.688466510,LastTimestamp:2025-11-25 18:07:57.38136461 +0000 UTC m=+4852.221506715,Count:3,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 18:07:58 crc kubenswrapper[4812]: I1125 18:07:58.260493 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-4jsp6_must-gather-c82df_7a0c3e31-def2-4f23-b5c4-13a6d052c06e/copy/0.log" Nov 25 18:07:58 crc kubenswrapper[4812]: I1125 18:07:58.261248 4812 generic.go:334] "Generic (PLEG): container finished" podID="7a0c3e31-def2-4f23-b5c4-13a6d052c06e" containerID="9ad13266decd54b9cc03443bd54c1158974f93c309eac671e0e4a0dd1161a314" exitCode=143 Nov 25 18:07:59 crc kubenswrapper[4812]: E1125 18:07:59.423958 4812 projected.go:288] Couldn't get configMap openshift-must-gather-4jsp6/kube-root-ca.crt: configmap "kube-root-ca.crt" not found Nov 25 18:07:59 crc kubenswrapper[4812]: E1125 18:07:59.424905 4812 projected.go:288] Couldn't get configMap openshift-must-gather-4jsp6/openshift-service-ca.crt: configmap "openshift-service-ca.crt" not found Nov 25 18:07:59 crc kubenswrapper[4812]: E1125 18:07:59.424927 4812 projected.go:194] Error preparing data for projected volume kube-api-access-d5kwf for pod openshift-must-gather-4jsp6/must-gather-c82df: [configmap "kube-root-ca.crt" not found, configmap "openshift-service-ca.crt" not found] Nov 25 18:07:59 crc kubenswrapper[4812]: E1125 18:07:59.425018 4812 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7a0c3e31-def2-4f23-b5c4-13a6d052c06e-kube-api-access-d5kwf podName:7a0c3e31-def2-4f23-b5c4-13a6d052c06e nodeName:}" failed. No retries permitted until 2025-11-25 18:08:03.424989584 +0000 UTC m=+4858.265131679 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-d5kwf" (UniqueName: "kubernetes.io/projected/7a0c3e31-def2-4f23-b5c4-13a6d052c06e-kube-api-access-d5kwf") pod "must-gather-c82df" (UID: "7a0c3e31-def2-4f23-b5c4-13a6d052c06e") : [configmap "kube-root-ca.crt" not found, configmap "openshift-service-ca.crt" not found] Nov 25 18:07:59 crc kubenswrapper[4812]: E1125 18:07:59.489222 4812 event.go:359] "Server rejected event (will not retry!)" err="namespaces \"openshift-must-gather-4jsp6\" not found" event="&Event{ObjectMeta:{must-gather-c82df.187b5237254f753f openshift-must-gather-4jsp6 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-must-gather-4jsp6,Name:must-gather-c82df,UID:7a0c3e31-def2-4f23-b5c4-13a6d052c06e,APIVersion:v1,ResourceVersion:73947,FieldPath:,},Reason:FailedMount,Message:MountVolume.SetUp failed for volume \"kube-api-access-d5kwf\" : [configmap \"kube-root-ca.crt\" not found, configmap \"openshift-service-ca.crt\" not found],Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 18:07:55.848324415 +0000 UTC m=+4850.688466510,LastTimestamp:2025-11-25 18:07:59.424957273 +0000 UTC m=+4854.265099368,Count:4,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 18:07:59 crc kubenswrapper[4812]: I1125 18:07:59.673737 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-4jsp6_must-gather-c82df_7a0c3e31-def2-4f23-b5c4-13a6d052c06e/copy/0.log" Nov 25 18:07:59 crc kubenswrapper[4812]: I1125 18:07:59.674785 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-4jsp6_must-gather-c82df_7a0c3e31-def2-4f23-b5c4-13a6d052c06e/gather/0.log" Nov 25 18:07:59 crc kubenswrapper[4812]: I1125 18:07:59.674888 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-4jsp6/must-gather-c82df" Nov 25 18:07:59 crc kubenswrapper[4812]: I1125 18:07:59.754693 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/7a0c3e31-def2-4f23-b5c4-13a6d052c06e-must-gather-output\") pod \"7a0c3e31-def2-4f23-b5c4-13a6d052c06e\" (UID: \"7a0c3e31-def2-4f23-b5c4-13a6d052c06e\") " Nov 25 18:07:59 crc kubenswrapper[4812]: I1125 18:07:59.754968 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d5kwf\" (UniqueName: \"kubernetes.io/projected/7a0c3e31-def2-4f23-b5c4-13a6d052c06e-kube-api-access-d5kwf\") pod \"7a0c3e31-def2-4f23-b5c4-13a6d052c06e\" (UID: \"7a0c3e31-def2-4f23-b5c4-13a6d052c06e\") " Nov 25 18:07:59 crc kubenswrapper[4812]: I1125 18:07:59.755392 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7a0c3e31-def2-4f23-b5c4-13a6d052c06e-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "7a0c3e31-def2-4f23-b5c4-13a6d052c06e" (UID: "7a0c3e31-def2-4f23-b5c4-13a6d052c06e"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:07:59 crc kubenswrapper[4812]: I1125 18:07:59.755836 4812 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/7a0c3e31-def2-4f23-b5c4-13a6d052c06e-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 25 18:07:59 crc kubenswrapper[4812]: I1125 18:07:59.762249 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a0c3e31-def2-4f23-b5c4-13a6d052c06e-kube-api-access-d5kwf" (OuterVolumeSpecName: "kube-api-access-d5kwf") pod "7a0c3e31-def2-4f23-b5c4-13a6d052c06e" (UID: "7a0c3e31-def2-4f23-b5c4-13a6d052c06e"). InnerVolumeSpecName "kube-api-access-d5kwf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:07:59 crc kubenswrapper[4812]: I1125 18:07:59.843202 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7a0c3e31-def2-4f23-b5c4-13a6d052c06e" path="/var/lib/kubelet/pods/7a0c3e31-def2-4f23-b5c4-13a6d052c06e/volumes" Nov 25 18:07:59 crc kubenswrapper[4812]: I1125 18:07:59.858747 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d5kwf\" (UniqueName: \"kubernetes.io/projected/7a0c3e31-def2-4f23-b5c4-13a6d052c06e-kube-api-access-d5kwf\") on node \"crc\" DevicePath \"\"" Nov 25 18:08:00 crc kubenswrapper[4812]: I1125 18:08:00.293054 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-4jsp6_must-gather-c82df_7a0c3e31-def2-4f23-b5c4-13a6d052c06e/copy/0.log" Nov 25 18:08:00 crc kubenswrapper[4812]: I1125 18:08:00.293469 4812 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-4jsp6_must-gather-c82df_7a0c3e31-def2-4f23-b5c4-13a6d052c06e/gather/0.log" Nov 25 18:08:00 crc kubenswrapper[4812]: I1125 18:08:00.293527 4812 generic.go:334] "Generic (PLEG): container finished" podID="7a0c3e31-def2-4f23-b5c4-13a6d052c06e" containerID="cf64c89ab176b35d453d76d606017e4a2d478b594d1f55f84d96052d41fc6048" exitCode=137 Nov 25 18:08:00 crc kubenswrapper[4812]: I1125 18:08:00.293613 4812 scope.go:117] "RemoveContainer" containerID="9ad13266decd54b9cc03443bd54c1158974f93c309eac671e0e4a0dd1161a314" Nov 25 18:08:00 crc kubenswrapper[4812]: I1125 18:08:00.293814 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-4jsp6/must-gather-c82df" Nov 25 18:08:00 crc kubenswrapper[4812]: I1125 18:08:00.334446 4812 scope.go:117] "RemoveContainer" containerID="cf64c89ab176b35d453d76d606017e4a2d478b594d1f55f84d96052d41fc6048" Nov 25 18:08:00 crc kubenswrapper[4812]: I1125 18:08:00.369157 4812 scope.go:117] "RemoveContainer" containerID="9ad13266decd54b9cc03443bd54c1158974f93c309eac671e0e4a0dd1161a314" Nov 25 18:08:00 crc kubenswrapper[4812]: E1125 18:08:00.369796 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ad13266decd54b9cc03443bd54c1158974f93c309eac671e0e4a0dd1161a314\": container with ID starting with 9ad13266decd54b9cc03443bd54c1158974f93c309eac671e0e4a0dd1161a314 not found: ID does not exist" containerID="9ad13266decd54b9cc03443bd54c1158974f93c309eac671e0e4a0dd1161a314" Nov 25 18:08:00 crc kubenswrapper[4812]: I1125 18:08:00.369845 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ad13266decd54b9cc03443bd54c1158974f93c309eac671e0e4a0dd1161a314"} err="failed to get container status \"9ad13266decd54b9cc03443bd54c1158974f93c309eac671e0e4a0dd1161a314\": rpc error: code = NotFound desc = could not find container \"9ad13266decd54b9cc03443bd54c1158974f93c309eac671e0e4a0dd1161a314\": container with ID starting with 9ad13266decd54b9cc03443bd54c1158974f93c309eac671e0e4a0dd1161a314 not found: ID does not exist" Nov 25 18:08:00 crc kubenswrapper[4812]: I1125 18:08:00.369884 4812 scope.go:117] "RemoveContainer" containerID="cf64c89ab176b35d453d76d606017e4a2d478b594d1f55f84d96052d41fc6048" Nov 25 18:08:00 crc kubenswrapper[4812]: E1125 18:08:00.370287 4812 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cf64c89ab176b35d453d76d606017e4a2d478b594d1f55f84d96052d41fc6048\": container with ID starting with cf64c89ab176b35d453d76d606017e4a2d478b594d1f55f84d96052d41fc6048 not found: ID does not exist" containerID="cf64c89ab176b35d453d76d606017e4a2d478b594d1f55f84d96052d41fc6048" Nov 25 18:08:00 crc kubenswrapper[4812]: I1125 18:08:00.370321 4812 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf64c89ab176b35d453d76d606017e4a2d478b594d1f55f84d96052d41fc6048"} err="failed to get container status \"cf64c89ab176b35d453d76d606017e4a2d478b594d1f55f84d96052d41fc6048\": rpc error: code = NotFound desc = could not find container \"cf64c89ab176b35d453d76d606017e4a2d478b594d1f55f84d96052d41fc6048\": container with ID starting with cf64c89ab176b35d453d76d606017e4a2d478b594d1f55f84d96052d41fc6048 not found: ID does not exist" Nov 25 18:08:05 crc kubenswrapper[4812]: I1125 18:08:05.839384 4812 scope.go:117] "RemoveContainer" containerID="ecc0184183ac35ed28d3bf0287ae00e97beefc4fd633395d2c10f1b0faea066b" Nov 25 18:08:05 crc kubenswrapper[4812]: E1125 18:08:05.840223 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:08:10 crc kubenswrapper[4812]: I1125 18:08:10.831638 4812 scope.go:117] "RemoveContainer" containerID="486e8f4338034b71187830cbe7a8041f996992d1857e8e19804e6b6a2e9d5ee8" Nov 25 18:08:10 crc kubenswrapper[4812]: E1125 18:08:10.832369 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 18:08:20 crc kubenswrapper[4812]: I1125 18:08:20.832453 4812 scope.go:117] "RemoveContainer" containerID="ecc0184183ac35ed28d3bf0287ae00e97beefc4fd633395d2c10f1b0faea066b" Nov 25 18:08:20 crc kubenswrapper[4812]: E1125 18:08:20.833327 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:08:25 crc kubenswrapper[4812]: I1125 18:08:25.842433 4812 scope.go:117] "RemoveContainer" containerID="486e8f4338034b71187830cbe7a8041f996992d1857e8e19804e6b6a2e9d5ee8" Nov 25 18:08:25 crc kubenswrapper[4812]: E1125 18:08:25.843343 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 18:08:27 crc kubenswrapper[4812]: I1125 18:08:27.332969 4812 patch_prober.go:28] interesting pod/machine-config-daemon-lcgpx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 18:08:27 crc kubenswrapper[4812]: I1125 18:08:27.333292 4812 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 18:08:27 crc kubenswrapper[4812]: I1125 18:08:27.333345 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" Nov 25 18:08:27 crc kubenswrapper[4812]: I1125 18:08:27.334214 4812 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1ac1e241c603a13ec9553ce63660deb35c2c2405ad96fbe133a2b2439aa7001e"} pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 18:08:27 crc kubenswrapper[4812]: I1125 18:08:27.334294 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" podUID="8ed911cf-2139-4b12-84ba-af635585ba29" containerName="machine-config-daemon" containerID="cri-o://1ac1e241c603a13ec9553ce63660deb35c2c2405ad96fbe133a2b2439aa7001e" gracePeriod=600 Nov 25 18:08:27 crc kubenswrapper[4812]: I1125 18:08:27.486133 4812 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-7tbnf"] Nov 25 18:08:27 crc kubenswrapper[4812]: E1125 18:08:27.487216 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a0c3e31-def2-4f23-b5c4-13a6d052c06e" containerName="gather" Nov 25 18:08:27 crc kubenswrapper[4812]: I1125 18:08:27.487240 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a0c3e31-def2-4f23-b5c4-13a6d052c06e" containerName="gather" Nov 25 18:08:27 crc kubenswrapper[4812]: E1125 18:08:27.487282 4812 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a0c3e31-def2-4f23-b5c4-13a6d052c06e" containerName="copy" Nov 25 18:08:27 crc kubenswrapper[4812]: I1125 18:08:27.487291 4812 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a0c3e31-def2-4f23-b5c4-13a6d052c06e" containerName="copy" Nov 25 18:08:27 crc kubenswrapper[4812]: I1125 18:08:27.487668 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a0c3e31-def2-4f23-b5c4-13a6d052c06e" containerName="copy" Nov 25 18:08:27 crc kubenswrapper[4812]: I1125 18:08:27.487707 4812 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a0c3e31-def2-4f23-b5c4-13a6d052c06e" containerName="gather" Nov 25 18:08:27 crc kubenswrapper[4812]: I1125 18:08:27.489746 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7tbnf" Nov 25 18:08:27 crc kubenswrapper[4812]: I1125 18:08:27.508342 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7tbnf"] Nov 25 18:08:27 crc kubenswrapper[4812]: I1125 18:08:27.564923 4812 generic.go:334] "Generic (PLEG): container finished" podID="8ed911cf-2139-4b12-84ba-af635585ba29" containerID="1ac1e241c603a13ec9553ce63660deb35c2c2405ad96fbe133a2b2439aa7001e" exitCode=0 Nov 25 18:08:27 crc kubenswrapper[4812]: I1125 18:08:27.564976 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" event={"ID":"8ed911cf-2139-4b12-84ba-af635585ba29","Type":"ContainerDied","Data":"1ac1e241c603a13ec9553ce63660deb35c2c2405ad96fbe133a2b2439aa7001e"} Nov 25 18:08:27 crc kubenswrapper[4812]: I1125 18:08:27.565018 4812 scope.go:117] "RemoveContainer" containerID="9af0f3138c22c859714afa98ee332470e15d86804218511d7c6509c67e1f94e4" Nov 25 18:08:27 crc kubenswrapper[4812]: I1125 18:08:27.570046 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79e9720a-72b6-482c-baf4-1553852c608a-catalog-content\") pod \"community-operators-7tbnf\" (UID: \"79e9720a-72b6-482c-baf4-1553852c608a\") " pod="openshift-marketplace/community-operators-7tbnf" Nov 25 18:08:27 crc kubenswrapper[4812]: I1125 18:08:27.570148 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79e9720a-72b6-482c-baf4-1553852c608a-utilities\") pod \"community-operators-7tbnf\" (UID: \"79e9720a-72b6-482c-baf4-1553852c608a\") " pod="openshift-marketplace/community-operators-7tbnf" Nov 25 18:08:27 crc kubenswrapper[4812]: I1125 18:08:27.570256 4812 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s7g85\" (UniqueName: \"kubernetes.io/projected/79e9720a-72b6-482c-baf4-1553852c608a-kube-api-access-s7g85\") pod \"community-operators-7tbnf\" (UID: \"79e9720a-72b6-482c-baf4-1553852c608a\") " pod="openshift-marketplace/community-operators-7tbnf" Nov 25 18:08:27 crc kubenswrapper[4812]: I1125 18:08:27.672089 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79e9720a-72b6-482c-baf4-1553852c608a-catalog-content\") pod \"community-operators-7tbnf\" (UID: \"79e9720a-72b6-482c-baf4-1553852c608a\") " pod="openshift-marketplace/community-operators-7tbnf" Nov 25 18:08:27 crc kubenswrapper[4812]: I1125 18:08:27.672199 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79e9720a-72b6-482c-baf4-1553852c608a-utilities\") pod \"community-operators-7tbnf\" (UID: \"79e9720a-72b6-482c-baf4-1553852c608a\") " pod="openshift-marketplace/community-operators-7tbnf" Nov 25 18:08:27 crc kubenswrapper[4812]: I1125 18:08:27.672284 4812 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s7g85\" (UniqueName: \"kubernetes.io/projected/79e9720a-72b6-482c-baf4-1553852c608a-kube-api-access-s7g85\") pod \"community-operators-7tbnf\" (UID: \"79e9720a-72b6-482c-baf4-1553852c608a\") " pod="openshift-marketplace/community-operators-7tbnf" Nov 25 18:08:27 crc kubenswrapper[4812]: I1125 18:08:27.672560 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79e9720a-72b6-482c-baf4-1553852c608a-catalog-content\") pod \"community-operators-7tbnf\" (UID: \"79e9720a-72b6-482c-baf4-1553852c608a\") " pod="openshift-marketplace/community-operators-7tbnf" Nov 25 18:08:27 crc kubenswrapper[4812]: I1125 18:08:27.672730 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79e9720a-72b6-482c-baf4-1553852c608a-utilities\") pod \"community-operators-7tbnf\" (UID: \"79e9720a-72b6-482c-baf4-1553852c608a\") " pod="openshift-marketplace/community-operators-7tbnf" Nov 25 18:08:27 crc kubenswrapper[4812]: I1125 18:08:27.693260 4812 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s7g85\" (UniqueName: \"kubernetes.io/projected/79e9720a-72b6-482c-baf4-1553852c608a-kube-api-access-s7g85\") pod \"community-operators-7tbnf\" (UID: \"79e9720a-72b6-482c-baf4-1553852c608a\") " pod="openshift-marketplace/community-operators-7tbnf" Nov 25 18:08:27 crc kubenswrapper[4812]: I1125 18:08:27.824091 4812 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7tbnf" Nov 25 18:08:28 crc kubenswrapper[4812]: W1125 18:08:28.307838 4812 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod79e9720a_72b6_482c_baf4_1553852c608a.slice/crio-e2589d778d11319979378b15195d4b095eda179a7494385c8568817e88e5976a WatchSource:0}: Error finding container e2589d778d11319979378b15195d4b095eda179a7494385c8568817e88e5976a: Status 404 returned error can't find the container with id e2589d778d11319979378b15195d4b095eda179a7494385c8568817e88e5976a Nov 25 18:08:28 crc kubenswrapper[4812]: I1125 18:08:28.310635 4812 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7tbnf"] Nov 25 18:08:28 crc kubenswrapper[4812]: I1125 18:08:28.579037 4812 generic.go:334] "Generic (PLEG): container finished" podID="79e9720a-72b6-482c-baf4-1553852c608a" containerID="f0bd97b4a9605d61e290c79bbb038a7ac3f8c04a1d77d6f4ce0bc07c860faa37" exitCode=0 Nov 25 18:08:28 crc kubenswrapper[4812]: I1125 18:08:28.579185 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7tbnf" event={"ID":"79e9720a-72b6-482c-baf4-1553852c608a","Type":"ContainerDied","Data":"f0bd97b4a9605d61e290c79bbb038a7ac3f8c04a1d77d6f4ce0bc07c860faa37"} Nov 25 18:08:28 crc kubenswrapper[4812]: I1125 18:08:28.579695 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7tbnf" event={"ID":"79e9720a-72b6-482c-baf4-1553852c608a","Type":"ContainerStarted","Data":"e2589d778d11319979378b15195d4b095eda179a7494385c8568817e88e5976a"} Nov 25 18:08:28 crc kubenswrapper[4812]: I1125 18:08:28.584713 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lcgpx" event={"ID":"8ed911cf-2139-4b12-84ba-af635585ba29","Type":"ContainerStarted","Data":"2257a4721090e8cdb1bbaad04afdc93bb8ae14cdb19c28dfa1ffce1acc4405a3"} Nov 25 18:08:29 crc kubenswrapper[4812]: I1125 18:08:29.595885 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7tbnf" event={"ID":"79e9720a-72b6-482c-baf4-1553852c608a","Type":"ContainerStarted","Data":"b54f2fb781ec90acecf1e0574cb32733751a623154cc5942cd75e01147aae108"} Nov 25 18:08:30 crc kubenswrapper[4812]: I1125 18:08:30.609564 4812 generic.go:334] "Generic (PLEG): container finished" podID="79e9720a-72b6-482c-baf4-1553852c608a" containerID="b54f2fb781ec90acecf1e0574cb32733751a623154cc5942cd75e01147aae108" exitCode=0 Nov 25 18:08:30 crc kubenswrapper[4812]: I1125 18:08:30.609668 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7tbnf" event={"ID":"79e9720a-72b6-482c-baf4-1553852c608a","Type":"ContainerDied","Data":"b54f2fb781ec90acecf1e0574cb32733751a623154cc5942cd75e01147aae108"} Nov 25 18:08:31 crc kubenswrapper[4812]: I1125 18:08:31.620948 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7tbnf" event={"ID":"79e9720a-72b6-482c-baf4-1553852c608a","Type":"ContainerStarted","Data":"1f281be1fa85debf0cc87b98f4017b618dad8b3e1f3a362b16a9631dfaf1912a"} Nov 25 18:08:31 crc kubenswrapper[4812]: I1125 18:08:31.649231 4812 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-7tbnf" podStartSLOduration=2.189754376 podStartE2EDuration="4.649214354s" podCreationTimestamp="2025-11-25 18:08:27 +0000 UTC" firstStartedPulling="2025-11-25 18:08:28.582725483 +0000 UTC m=+4883.422867588" lastFinishedPulling="2025-11-25 18:08:31.042185471 +0000 UTC m=+4885.882327566" observedRunningTime="2025-11-25 18:08:31.642169764 +0000 UTC m=+4886.482311849" watchObservedRunningTime="2025-11-25 18:08:31.649214354 +0000 UTC m=+4886.489356449" Nov 25 18:08:33 crc kubenswrapper[4812]: I1125 18:08:33.832309 4812 scope.go:117] "RemoveContainer" containerID="ecc0184183ac35ed28d3bf0287ae00e97beefc4fd633395d2c10f1b0faea066b" Nov 25 18:08:33 crc kubenswrapper[4812]: E1125 18:08:33.833372 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:08:37 crc kubenswrapper[4812]: I1125 18:08:37.824568 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-7tbnf" Nov 25 18:08:37 crc kubenswrapper[4812]: I1125 18:08:37.825269 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-7tbnf" Nov 25 18:08:37 crc kubenswrapper[4812]: I1125 18:08:37.888382 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-7tbnf" Nov 25 18:08:38 crc kubenswrapper[4812]: I1125 18:08:38.781206 4812 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-7tbnf" Nov 25 18:08:38 crc kubenswrapper[4812]: I1125 18:08:38.837312 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7tbnf"] Nov 25 18:08:40 crc kubenswrapper[4812]: I1125 18:08:40.732002 4812 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-7tbnf" podUID="79e9720a-72b6-482c-baf4-1553852c608a" containerName="registry-server" containerID="cri-o://1f281be1fa85debf0cc87b98f4017b618dad8b3e1f3a362b16a9631dfaf1912a" gracePeriod=2 Nov 25 18:08:40 crc kubenswrapper[4812]: I1125 18:08:40.832131 4812 scope.go:117] "RemoveContainer" containerID="486e8f4338034b71187830cbe7a8041f996992d1857e8e19804e6b6a2e9d5ee8" Nov 25 18:08:40 crc kubenswrapper[4812]: E1125 18:08:40.832598 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 18:08:41 crc kubenswrapper[4812]: I1125 18:08:41.742860 4812 generic.go:334] "Generic (PLEG): container finished" podID="79e9720a-72b6-482c-baf4-1553852c608a" containerID="1f281be1fa85debf0cc87b98f4017b618dad8b3e1f3a362b16a9631dfaf1912a" exitCode=0 Nov 25 18:08:41 crc kubenswrapper[4812]: I1125 18:08:41.742936 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7tbnf" event={"ID":"79e9720a-72b6-482c-baf4-1553852c608a","Type":"ContainerDied","Data":"1f281be1fa85debf0cc87b98f4017b618dad8b3e1f3a362b16a9631dfaf1912a"} Nov 25 18:08:41 crc kubenswrapper[4812]: I1125 18:08:41.743434 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7tbnf" event={"ID":"79e9720a-72b6-482c-baf4-1553852c608a","Type":"ContainerDied","Data":"e2589d778d11319979378b15195d4b095eda179a7494385c8568817e88e5976a"} Nov 25 18:08:41 crc kubenswrapper[4812]: I1125 18:08:41.743454 4812 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e2589d778d11319979378b15195d4b095eda179a7494385c8568817e88e5976a" Nov 25 18:08:41 crc kubenswrapper[4812]: I1125 18:08:41.767519 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7tbnf" Nov 25 18:08:41 crc kubenswrapper[4812]: I1125 18:08:41.894771 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79e9720a-72b6-482c-baf4-1553852c608a-utilities\") pod \"79e9720a-72b6-482c-baf4-1553852c608a\" (UID: \"79e9720a-72b6-482c-baf4-1553852c608a\") " Nov 25 18:08:41 crc kubenswrapper[4812]: I1125 18:08:41.894843 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79e9720a-72b6-482c-baf4-1553852c608a-catalog-content\") pod \"79e9720a-72b6-482c-baf4-1553852c608a\" (UID: \"79e9720a-72b6-482c-baf4-1553852c608a\") " Nov 25 18:08:41 crc kubenswrapper[4812]: I1125 18:08:41.895080 4812 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s7g85\" (UniqueName: \"kubernetes.io/projected/79e9720a-72b6-482c-baf4-1553852c608a-kube-api-access-s7g85\") pod \"79e9720a-72b6-482c-baf4-1553852c608a\" (UID: \"79e9720a-72b6-482c-baf4-1553852c608a\") " Nov 25 18:08:41 crc kubenswrapper[4812]: I1125 18:08:41.896474 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/79e9720a-72b6-482c-baf4-1553852c608a-utilities" (OuterVolumeSpecName: "utilities") pod "79e9720a-72b6-482c-baf4-1553852c608a" (UID: "79e9720a-72b6-482c-baf4-1553852c608a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:08:41 crc kubenswrapper[4812]: I1125 18:08:41.905860 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79e9720a-72b6-482c-baf4-1553852c608a-kube-api-access-s7g85" (OuterVolumeSpecName: "kube-api-access-s7g85") pod "79e9720a-72b6-482c-baf4-1553852c608a" (UID: "79e9720a-72b6-482c-baf4-1553852c608a"). InnerVolumeSpecName "kube-api-access-s7g85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 18:08:41 crc kubenswrapper[4812]: I1125 18:08:41.943017 4812 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/79e9720a-72b6-482c-baf4-1553852c608a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "79e9720a-72b6-482c-baf4-1553852c608a" (UID: "79e9720a-72b6-482c-baf4-1553852c608a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 18:08:41 crc kubenswrapper[4812]: I1125 18:08:41.998593 4812 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79e9720a-72b6-482c-baf4-1553852c608a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 18:08:41 crc kubenswrapper[4812]: I1125 18:08:41.998631 4812 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s7g85\" (UniqueName: \"kubernetes.io/projected/79e9720a-72b6-482c-baf4-1553852c608a-kube-api-access-s7g85\") on node \"crc\" DevicePath \"\"" Nov 25 18:08:41 crc kubenswrapper[4812]: I1125 18:08:41.998650 4812 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79e9720a-72b6-482c-baf4-1553852c608a-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 18:08:42 crc kubenswrapper[4812]: I1125 18:08:42.754516 4812 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7tbnf" Nov 25 18:08:42 crc kubenswrapper[4812]: I1125 18:08:42.823243 4812 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7tbnf"] Nov 25 18:08:42 crc kubenswrapper[4812]: I1125 18:08:42.832212 4812 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-7tbnf"] Nov 25 18:08:43 crc kubenswrapper[4812]: I1125 18:08:43.844315 4812 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79e9720a-72b6-482c-baf4-1553852c608a" path="/var/lib/kubelet/pods/79e9720a-72b6-482c-baf4-1553852c608a/volumes" Nov 25 18:08:47 crc kubenswrapper[4812]: I1125 18:08:47.832886 4812 scope.go:117] "RemoveContainer" containerID="ecc0184183ac35ed28d3bf0287ae00e97beefc4fd633395d2c10f1b0faea066b" Nov 25 18:08:47 crc kubenswrapper[4812]: E1125 18:08:47.833788 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:08:54 crc kubenswrapper[4812]: I1125 18:08:54.831921 4812 scope.go:117] "RemoveContainer" containerID="486e8f4338034b71187830cbe7a8041f996992d1857e8e19804e6b6a2e9d5ee8" Nov 25 18:08:54 crc kubenswrapper[4812]: E1125 18:08:54.832643 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-api\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-api pod=manila-api-0_openstack(6d2f6681-d1d6-45e9-b0f4-65209caf0069)\"" pod="openstack/manila-api-0" podUID="6d2f6681-d1d6-45e9-b0f4-65209caf0069" Nov 25 18:09:00 crc kubenswrapper[4812]: I1125 18:09:00.831666 4812 scope.go:117] "RemoveContainer" containerID="ecc0184183ac35ed28d3bf0287ae00e97beefc4fd633395d2c10f1b0faea066b" Nov 25 18:09:01 crc kubenswrapper[4812]: I1125 18:09:01.965517 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"8af2d4e3-790f-4ab4-92e8-1c0a083b9531","Type":"ContainerStarted","Data":"eb96af487432ed76ef7d1b655926d6bde356e9e2126ac08add98ec04be45121f"} Nov 25 18:09:02 crc kubenswrapper[4812]: I1125 18:09:02.979050 4812 generic.go:334] "Generic (PLEG): container finished" podID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" containerID="eb96af487432ed76ef7d1b655926d6bde356e9e2126ac08add98ec04be45121f" exitCode=1 Nov 25 18:09:02 crc kubenswrapper[4812]: I1125 18:09:02.979273 4812 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"8af2d4e3-790f-4ab4-92e8-1c0a083b9531","Type":"ContainerDied","Data":"eb96af487432ed76ef7d1b655926d6bde356e9e2126ac08add98ec04be45121f"} Nov 25 18:09:02 crc kubenswrapper[4812]: I1125 18:09:02.979427 4812 scope.go:117] "RemoveContainer" containerID="ecc0184183ac35ed28d3bf0287ae00e97beefc4fd633395d2c10f1b0faea066b" Nov 25 18:09:02 crc kubenswrapper[4812]: I1125 18:09:02.980098 4812 scope.go:117] "RemoveContainer" containerID="eb96af487432ed76ef7d1b655926d6bde356e9e2126ac08add98ec04be45121f" Nov 25 18:09:02 crc kubenswrapper[4812]: E1125 18:09:02.980355 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" Nov 25 18:09:05 crc kubenswrapper[4812]: I1125 18:09:05.164622 4812 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 25 18:09:05 crc kubenswrapper[4812]: I1125 18:09:05.165309 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 25 18:09:05 crc kubenswrapper[4812]: I1125 18:09:05.165330 4812 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 25 18:09:05 crc kubenswrapper[4812]: I1125 18:09:05.166139 4812 scope.go:117] "RemoveContainer" containerID="eb96af487432ed76ef7d1b655926d6bde356e9e2126ac08add98ec04be45121f" Nov 25 18:09:05 crc kubenswrapper[4812]: E1125 18:09:05.166627 4812 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-share\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=manila-share pod=manila-share-share1-0_openstack(8af2d4e3-790f-4ab4-92e8-1c0a083b9531)\"" pod="openstack/manila-share-share1-0" podUID="8af2d4e3-790f-4ab4-92e8-1c0a083b9531" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515111370310024435 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015111370310017352 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015111356221016502 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015111356221015452 5ustar corecore